Add BufFileRead variants with short read and EOF detection
Most callers of BufFileRead() want to check whether they read the full
specified length. Checking this at every call site is very tedious.
This patch provides additional variants BufFileReadExact() and
BufFileReadMaybeEOF() that include the length checks.
I considered changing BufFileRead() itself, but this function is also
used in extensions, and so changing the behavior like this would create
a lot of problems there. The new names are analogous to the existing
LogicalTapeReadExact().
Attachments:
0001-Add-BufFileRead-variants-with-short-read-and-EOF-det.patchtext/plain; charset=UTF-8; name=0001-Add-BufFileRead-variants-with-short-read-and-EOF-det.patchDownload
From c40ee920f931d42b69338c777639200bafbee805 Mon Sep 17 00:00:00 2001
From: Peter Eisentraut <peter@eisentraut.org>
Date: Wed, 28 Dec 2022 11:46:14 +0100
Subject: [PATCH] Add BufFileRead variants with short read and EOF detection
Most callers of BufFileRead() want to check whether they read the full
specified length. Checking this at every call site is very tedious.
This patch provides additional variants BufFileReadExact() and
BufFileReadMaybeEOF() that include the length checks.
I considered changing BufFileRead() itself, but this function is also
used in extensions, and so changing the behavior like this would
create a lot of problems there. The new names are analogous to the
existing LogicalTapeReadExact().
---
src/backend/access/gist/gistbuildbuffers.c | 7 +---
src/backend/backup/backup_manifest.c | 8 +---
src/backend/executor/nodeHashjoin.c | 18 ++------
src/backend/replication/logical/worker.c | 29 +++----------
src/backend/storage/file/buffile.c | 47 +++++++++++++++++++--
src/backend/utils/sort/logtape.c | 9 +---
src/backend/utils/sort/sharedtuplestore.c | 49 +++-------------------
src/backend/utils/sort/tuplestore.c | 29 +++----------
src/include/storage/buffile.h | 4 +-
9 files changed, 71 insertions(+), 129 deletions(-)
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 538e3880c9..bef98b292d 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -753,14 +753,9 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
static void
ReadTempFileBlock(BufFile *file, long blknum, void *ptr)
{
- size_t nread;
-
if (BufFileSeekBlock(file, blknum) != 0)
elog(ERROR, "could not seek to block %ld in temporary file", blknum);
- nread = BufFileRead(file, ptr, BLCKSZ);
- if (nread != BLCKSZ)
- elog(ERROR, "could not read temporary file: read only %zu of %zu bytes",
- nread, (size_t) BLCKSZ);
+ BufFileReadExact(file, ptr, BLCKSZ);
}
static void
diff --git a/src/backend/backup/backup_manifest.c b/src/backend/backup/backup_manifest.c
index a54185fdab..ae2077794f 100644
--- a/src/backend/backup/backup_manifest.c
+++ b/src/backend/backup/backup_manifest.c
@@ -362,16 +362,10 @@ SendBackupManifest(backup_manifest_info *manifest, bbsink *sink)
while (manifest_bytes_done < manifest->manifest_size)
{
size_t bytes_to_read;
- size_t rc;
bytes_to_read = Min(sink->bbs_buffer_length,
manifest->manifest_size - manifest_bytes_done);
- rc = BufFileRead(manifest->buffile, sink->bbs_buffer,
- bytes_to_read);
- if (rc != bytes_to_read)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from temporary file: %m")));
+ BufFileReadExact(manifest->buffile, sink->bbs_buffer, bytes_to_read);
bbsink_manifest_contents(sink, bytes_to_read);
manifest_bytes_done += bytes_to_read;
}
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 3e1a997f92..605e12fe8c 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -1260,28 +1260,18 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
* we can read them both in one BufFileRead() call without any type
* cheating.
*/
- nread = BufFileRead(file, header, sizeof(header));
+ nread = BufFileReadMaybeEOF(file, header, sizeof(header), true);
if (nread == 0) /* end of file */
{
ExecClearTuple(tupleSlot);
return NULL;
}
- if (nread != sizeof(header))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from hash-join temporary file: read only %zu of %zu bytes",
- nread, sizeof(header))));
*hashvalue = header[0];
tuple = (MinimalTuple) palloc(header[1]);
tuple->t_len = header[1];
- nread = BufFileRead(file,
- ((char *) tuple + sizeof(uint32)),
- header[1] - sizeof(uint32));
- if (nread != header[1] - sizeof(uint32))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from hash-join temporary file: read only %zu of %zu bytes",
- nread, header[1] - sizeof(uint32))));
+ BufFileReadExact(file,
+ (char *) tuple + sizeof(uint32),
+ header[1] - sizeof(uint32));
ExecForceStoreMinimalTuple(tuple, tupleSlot, true);
return tupleSlot;
}
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 96772e4d73..a51909bbad 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -1432,19 +1432,13 @@ apply_spooled_messages(TransactionId xid, XLogRecPtr lsn)
CHECK_FOR_INTERRUPTS();
/* read length of the on-disk record */
- nbytes = BufFileRead(fd, &len, sizeof(len));
+ nbytes = BufFileReadMaybeEOF(fd, &len, sizeof(len), true);
/* have we reached end of the file? */
if (nbytes == 0)
break;
/* do we have a correct length? */
- if (nbytes != sizeof(len))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's changes file \"%s\": %m",
- path)));
-
if (len <= 0)
elog(ERROR, "incorrect length %d in streaming transaction's changes file \"%s\"",
len, path);
@@ -1453,11 +1447,7 @@ apply_spooled_messages(TransactionId xid, XLogRecPtr lsn)
buffer = repalloc(buffer, len);
/* and finally read the data into the buffer */
- if (BufFileRead(fd, buffer, len) != len)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's changes file \"%s\": %m",
- path)));
+ BufFileReadExact(fd, buffer, len);
/* copy the buffer to the stringinfo and call apply_dispatch */
resetStringInfo(&s2);
@@ -3245,13 +3235,7 @@ subxact_info_read(Oid subid, TransactionId xid)
return;
/* read number of subxact items */
- if (BufFileRead(fd, &subxact_data.nsubxacts,
- sizeof(subxact_data.nsubxacts)) !=
- sizeof(subxact_data.nsubxacts))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's subxact file \"%s\": %m",
- path)));
+ BufFileReadExact(fd, &subxact_data.nsubxacts, sizeof(subxact_data.nsubxacts));
len = sizeof(SubXactInfo) * subxact_data.nsubxacts;
@@ -3269,11 +3253,8 @@ subxact_info_read(Oid subid, TransactionId xid)
sizeof(SubXactInfo));
MemoryContextSwitchTo(oldctx);
- if ((len > 0) && ((BufFileRead(fd, subxact_data.subxacts, len)) != len))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's subxact file \"%s\": %m",
- path)));
+ if (len > 0)
+ BufFileReadExact(fd, subxact_data.subxacts, len);
BufFileClose(fd);
}
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index b0b4eeb3bd..61a47bcd73 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -573,14 +573,19 @@ BufFileDumpBuffer(BufFile *file)
}
/*
- * BufFileRead
+ * BufFileRead variants
*
* Like fread() except we assume 1-byte element size and report I/O errors via
* ereport().
+ *
+ * If 'exact' is true, then an error is also raised if the number of bytes
+ * read is not exactly 'size' (no short reads). If 'exact' and 'eofOK' are
+ * true, then reading zero bytes is ok.
*/
-size_t
-BufFileRead(BufFile *file, void *ptr, size_t size)
+static size_t
+BufFileReadCommon(BufFile *file, void *ptr, size_t size, bool exact, bool eofOK)
{
+ size_t start_size = size;
size_t nread = 0;
size_t nthistime;
@@ -612,9 +617,45 @@ BufFileRead(BufFile *file, void *ptr, size_t size)
nread += nthistime;
}
+ if (exact &&
+ (nread != start_size && !(nread == 0 && eofOK)))
+ ereport(ERROR,
+ errcode_for_file_access(),
+ errmsg("could not read from temporary file: read only %zu of %zu bytes",
+ nread, start_size));
+
return nread;
}
+/*
+ * Legacy interface where the caller needs to check for end of file or short
+ * reads.
+ */
+size_t
+BufFileRead(BufFile *file, void *ptr, size_t size)
+{
+ return BufFileReadCommon(file, ptr, size, false, false);
+}
+
+/*
+ * Require read of exactly the specified size.
+ */
+void
+BufFileReadExact(BufFile *file, void *ptr, size_t size)
+{
+ BufFileReadCommon(file, ptr, size, true, false);
+}
+
+/*
+ * Require read of exactly the specified size, but optionally allow end of
+ * file (in which case 0 is returned).
+ */
+size_t
+BufFileReadMaybeEOF(BufFile *file, void *ptr, size_t size, bool eofOK)
+{
+ return BufFileReadCommon(file, ptr, size, true, eofOK);
+}
+
/*
* BufFileWrite
*
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index c384f98e13..48445bc97c 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -281,19 +281,12 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
static void
ltsReadBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
{
- size_t nread;
-
if (BufFileSeekBlock(lts->pfile, blocknum) != 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not seek to block %ld of temporary file",
blocknum)));
- nread = BufFileRead(lts->pfile, buffer, BLCKSZ);
- if (nread != BLCKSZ)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read block %ld of temporary file: read only %zu of %zu bytes",
- blocknum, nread, (size_t) BLCKSZ)));
+ BufFileReadExact(lts->pfile, buffer, BLCKSZ);
}
/*
diff --git a/src/backend/utils/sort/sharedtuplestore.c b/src/backend/utils/sort/sharedtuplestore.c
index 996cef07d4..45152061dd 100644
--- a/src/backend/utils/sort/sharedtuplestore.c
+++ b/src/backend/utils/sort/sharedtuplestore.c
@@ -422,23 +422,10 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
*/
if (accessor->sts->meta_data_size > 0)
{
- if (BufFileRead(accessor->read_file,
- meta_data,
- accessor->sts->meta_data_size) !=
- accessor->sts->meta_data_size)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading meta-data.")));
+ BufFileReadExact(accessor->read_file, meta_data, accessor->sts->meta_data_size);
accessor->read_bytes += accessor->sts->meta_data_size;
}
- if (BufFileRead(accessor->read_file,
- &size,
- sizeof(size)) != sizeof(size))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading size.")));
+ BufFileReadExact(accessor->read_file, &size, sizeof(size));
accessor->read_bytes += sizeof(size);
if (size > accessor->read_buffer_size)
{
@@ -455,13 +442,7 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
this_chunk_size = Min(remaining_size,
BLCKSZ * STS_CHUNK_PAGES - accessor->read_bytes);
destination = accessor->read_buffer + sizeof(uint32);
- if (BufFileRead(accessor->read_file,
- destination,
- this_chunk_size) != this_chunk_size)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading tuple.")));
+ BufFileReadExact(accessor->read_file, destination, this_chunk_size);
accessor->read_bytes += this_chunk_size;
remaining_size -= this_chunk_size;
destination += this_chunk_size;
@@ -473,12 +454,7 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
/* We are now positioned at the start of an overflow chunk. */
SharedTuplestoreChunk chunk_header;
- if (BufFileRead(accessor->read_file, &chunk_header, STS_CHUNK_HEADER_SIZE) !=
- STS_CHUNK_HEADER_SIZE)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading overflow chunk header.")));
+ BufFileReadExact(accessor->read_file, &chunk_header, STS_CHUNK_HEADER_SIZE);
accessor->read_bytes = STS_CHUNK_HEADER_SIZE;
if (chunk_header.overflow == 0)
ereport(ERROR,
@@ -489,13 +465,7 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
this_chunk_size = Min(remaining_size,
BLCKSZ * STS_CHUNK_PAGES -
STS_CHUNK_HEADER_SIZE);
- if (BufFileRead(accessor->read_file,
- destination,
- this_chunk_size) != this_chunk_size)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading tuple.")));
+ BufFileReadExact(accessor->read_file, destination, this_chunk_size);
accessor->read_bytes += this_chunk_size;
remaining_size -= this_chunk_size;
destination += this_chunk_size;
@@ -551,7 +521,6 @@ sts_parallel_scan_next(SharedTuplestoreAccessor *accessor, void *meta_data)
if (!eof)
{
SharedTuplestoreChunk chunk_header;
- size_t nread;
/* Make sure we have the file open. */
if (accessor->read_file == NULL)
@@ -570,13 +539,7 @@ sts_parallel_scan_next(SharedTuplestoreAccessor *accessor, void *meta_data)
(errcode_for_file_access(),
errmsg("could not seek to block %u in shared tuplestore temporary file",
read_page)));
- nread = BufFileRead(accessor->read_file, &chunk_header,
- STS_CHUNK_HEADER_SIZE);
- if (nread != STS_CHUNK_HEADER_SIZE)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file: read only %zu of %zu bytes",
- nread, STS_CHUNK_HEADER_SIZE)));
+ BufFileReadExact(accessor->read_file, &chunk_header, STS_CHUNK_HEADER_SIZE);
/*
* If this is an overflow chunk, we skip it and any following
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index cc884ab116..6b4e7e4051 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -1468,15 +1468,11 @@ getlen(Tuplestorestate *state, bool eofOK)
unsigned int len;
size_t nbytes;
- nbytes = BufFileRead(state->myfile, &len, sizeof(len));
- if (nbytes == sizeof(len))
+ nbytes = BufFileReadMaybeEOF(state->myfile, &len, sizeof(len), eofOK);
+ if (nbytes == 0)
+ return 0;
+ else
return len;
- if (nbytes != 0 || !eofOK)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from tuplestore temporary file: read only %zu of %zu bytes",
- nbytes, sizeof(len))));
- return 0;
}
@@ -1528,25 +1524,12 @@ readtup_heap(Tuplestorestate *state, unsigned int len)
unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
MinimalTuple tuple = (MinimalTuple) palloc(tuplen);
char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
- size_t nread;
USEMEM(state, GetMemoryChunkSpace(tuple));
/* read in the tuple proper */
tuple->t_len = tuplen;
- nread = BufFileRead(state->myfile, tupbody, tupbodylen);
- if (nread != (size_t) tupbodylen)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from tuplestore temporary file: read only %zu of %zu bytes",
- nread, (size_t) tupbodylen)));
+ BufFileReadExact(state->myfile, tupbody, tupbodylen);
if (state->backward) /* need trailing length word? */
- {
- nread = BufFileRead(state->myfile, &tuplen, sizeof(tuplen));
- if (nread != sizeof(tuplen))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from tuplestore temporary file: read only %zu of %zu bytes",
- nread, sizeof(tuplen))));
- }
+ BufFileReadExact(state->myfile, &tuplen, sizeof(tuplen));
return (void *) tuple;
}
diff --git a/src/include/storage/buffile.h b/src/include/storage/buffile.h
index a4922d1853..4b8d53a59e 100644
--- a/src/include/storage/buffile.h
+++ b/src/include/storage/buffile.h
@@ -38,7 +38,9 @@ typedef struct BufFile BufFile;
extern BufFile *BufFileCreateTemp(bool interXact);
extern void BufFileClose(BufFile *file);
-extern size_t BufFileRead(BufFile *file, void *ptr, size_t size);
+extern pg_nodiscard size_t BufFileRead(BufFile *file, void *ptr, size_t size);
+extern void BufFileReadExact(BufFile *file, void *ptr, size_t size);
+extern size_t BufFileReadMaybeEOF(BufFile *file, void *ptr, size_t size, bool eofOK);
extern void BufFileWrite(BufFile *file, void *ptr, size_t size);
extern int BufFileSeek(BufFile *file, int fileno, off_t offset, int whence);
extern void BufFileTell(BufFile *file, int *fileno, off_t *offset);
base-commit: adb5c32eb53e1ffdc5c954aafcc5bc9ed93f3de6
--
2.39.0
On Wed, Dec 28, 2022 at 4:17 PM Peter Eisentraut
<peter.eisentraut@enterprisedb.com> wrote:
Most callers of BufFileRead() want to check whether they read the full
specified length. Checking this at every call site is very tedious.
This patch provides additional variants BufFileReadExact() and
BufFileReadMaybeEOF() that include the length checks.I considered changing BufFileRead() itself, but this function is also
used in extensions, and so changing the behavior like this would create
a lot of problems there. The new names are analogous to the existing
LogicalTapeReadExact().
+1 for the new APIs. I have noticed that some of the existing places
use %m and the file path in messages which are not used in the new
common function.
--
With Regards,
Amit Kapila.
On Wed, 28 Dec 2022 at 16:17, Peter Eisentraut
<peter.eisentraut@enterprisedb.com> wrote:
Most callers of BufFileRead() want to check whether they read the full
specified length. Checking this at every call site is very tedious.
This patch provides additional variants BufFileReadExact() and
BufFileReadMaybeEOF() that include the length checks.I considered changing BufFileRead() itself, but this function is also
used in extensions, and so changing the behavior like this would create
a lot of problems there. The new names are analogous to the existing
LogicalTapeReadExact().
The patch does not apply on top of HEAD as in [1]http://cfbot.cputube.org/patch_41_4089.log, please post a rebased patch:
=== Applying patches on top of PostgreSQL commit ID
e351f85418313e97c203c73181757a007dfda6d0 ===
=== applying patch
./0001-Add-BufFileRead-variants-with-short-read-and-EOF-det.patch
patching file src/backend/access/gist/gistbuildbuffers.c
...
Hunk #1 FAILED at 38.
1 out of 1 hunk FAILED -- saving rejects to file
src/include/storage/buffile.h.rej
[1]: http://cfbot.cputube.org/patch_41_4089.log
Regards,
Vignesh
On 02.01.23 13:13, Amit Kapila wrote:
On Wed, Dec 28, 2022 at 4:17 PM Peter Eisentraut
<peter.eisentraut@enterprisedb.com> wrote:Most callers of BufFileRead() want to check whether they read the full
specified length. Checking this at every call site is very tedious.
This patch provides additional variants BufFileReadExact() and
BufFileReadMaybeEOF() that include the length checks.I considered changing BufFileRead() itself, but this function is also
used in extensions, and so changing the behavior like this would create
a lot of problems there. The new names are analogous to the existing
LogicalTapeReadExact().+1 for the new APIs. I have noticed that some of the existing places
use %m and the file path in messages which are not used in the new
common function.
The existing uses of %m are wrong. This was already fixed once in
7897e3bb902c557412645b82120f4d95f7474906, but the affected areas of code
were apparently developed at around the same time and didn't get the
fix. So I have attached a separate patch to fix this first, which could
be backpatched.
The original patch is then rebased on top of that. I have adjusted the
error message to include the file set name if available.
What this doesn't keep is the purpose of the temp file in some cases,
like "hash-join temporary file". We could maybe make this an additional
argument or an error context, but it seems cumbersome in any case.
Thoughts?
Attachments:
v2-0001-Fix-some-BufFileRead-error-reporting.patchtext/plain; charset=UTF-8; name=v2-0001-Fix-some-BufFileRead-error-reporting.patchDownload
From fca7c4724e2dd4cd3edd1e59275e6b6ca2448bb3 Mon Sep 17 00:00:00 2001
From: Peter Eisentraut <peter@eisentraut.org>
Date: Fri, 6 Jan 2023 11:57:35 +0100
Subject: [PATCH v2 1/2] Fix some BufFileRead() error reporting
Remove "%m" from error messages where errno would be bogus. Add short
read byte counts where appropriate.
This is equivalent to what was done in
7897e3bb902c557412645b82120f4d95f7474906, but some code was apparently
developed concurrently to that and not updated accordingly.
---
src/backend/backup/backup_manifest.c | 3 ++-
src/backend/replication/logical/worker.c | 33 ++++++++++++++----------
2 files changed, 21 insertions(+), 15 deletions(-)
diff --git a/src/backend/backup/backup_manifest.c b/src/backend/backup/backup_manifest.c
index 21ca2941dc..d325ef047a 100644
--- a/src/backend/backup/backup_manifest.c
+++ b/src/backend/backup/backup_manifest.c
@@ -371,7 +371,8 @@ SendBackupManifest(backup_manifest_info *manifest, bbsink *sink)
if (rc != bytes_to_read)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from temporary file: %m")));
+ errmsg("could not read from temporary file: read only %zu of %zu bytes",
+ rc, bytes_to_read)));
bbsink_manifest_contents(sink, bytes_to_read);
manifest_bytes_done += bytes_to_read;
}
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index f8e8cf71eb..64ce93e725 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -1426,7 +1426,7 @@ apply_spooled_messages(TransactionId xid, XLogRecPtr lsn)
nchanges = 0;
while (true)
{
- int nbytes;
+ size_t nbytes;
int len;
CHECK_FOR_INTERRUPTS();
@@ -1442,8 +1442,8 @@ apply_spooled_messages(TransactionId xid, XLogRecPtr lsn)
if (nbytes != sizeof(len))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from streaming transaction's changes file \"%s\": %m",
- path)));
+ errmsg("could not read from streaming transaction's changes file \"%s\": read only %zu of %zu bytes",
+ path, nbytes, sizeof(len))));
if (len <= 0)
elog(ERROR, "incorrect length %d in streaming transaction's changes file \"%s\"",
@@ -1453,11 +1453,12 @@ apply_spooled_messages(TransactionId xid, XLogRecPtr lsn)
buffer = repalloc(buffer, len);
/* and finally read the data into the buffer */
- if (BufFileRead(fd, buffer, len) != len)
+ nbytes = BufFileRead(fd, buffer, len);
+ if (nbytes != len)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from streaming transaction's changes file \"%s\": %m",
- path)));
+ errmsg("could not read from streaming transaction's changes file \"%s\": read only %zu of %zu bytes",
+ path, nbytes, (size_t) len)));
/* copy the buffer to the stringinfo and call apply_dispatch */
resetStringInfo(&s2);
@@ -3221,6 +3222,7 @@ static void
subxact_info_read(Oid subid, TransactionId xid)
{
char path[MAXPGPATH];
+ size_t nread;
Size len;
BufFile *fd;
MemoryContext oldctx;
@@ -3240,13 +3242,12 @@ subxact_info_read(Oid subid, TransactionId xid)
return;
/* read number of subxact items */
- if (BufFileRead(fd, &subxact_data.nsubxacts,
- sizeof(subxact_data.nsubxacts)) !=
- sizeof(subxact_data.nsubxacts))
+ nread = BufFileRead(fd, &subxact_data.nsubxacts, sizeof(subxact_data.nsubxacts));
+ if (nread != sizeof(subxact_data.nsubxacts))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from streaming transaction's subxact file \"%s\": %m",
- path)));
+ errmsg("could not read from streaming transaction's subxact file \"%s\": read only %zu of %zu bytes",
+ path, nread, sizeof(subxact_data.nsubxacts))));
len = sizeof(SubXactInfo) * subxact_data.nsubxacts;
@@ -3264,11 +3265,15 @@ subxact_info_read(Oid subid, TransactionId xid)
sizeof(SubXactInfo));
MemoryContextSwitchTo(oldctx);
- if ((len > 0) && ((BufFileRead(fd, subxact_data.subxacts, len)) != len))
+ if (len > 0)
+ {
+ nread = BufFileRead(fd, subxact_data.subxacts, len);
+ if (nread != len)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from streaming transaction's subxact file \"%s\": %m",
- path)));
+ errmsg("could not read from streaming transaction's subxact file \"%s\": read only %zu of %zu bytes",
+ path, nread, len)));
+ }
BufFileClose(fd);
}
base-commit: 72aea955d49712a17c08748aa9abcbcf98c32fc5
--
2.39.0
v2-0002-Add-BufFileRead-variants-with-short-read-and-EOF-.patchtext/plain; charset=UTF-8; name=v2-0002-Add-BufFileRead-variants-with-short-read-and-EOF-.patchDownload
From 34f1df4fbd6dda2e3b0a434e4aa4a080f52799c8 Mon Sep 17 00:00:00 2001
From: Peter Eisentraut <peter@eisentraut.org>
Date: Fri, 6 Jan 2023 12:13:49 +0100
Subject: [PATCH v2 2/2] Add BufFileRead variants with short read and EOF
detection
Most callers of BufFileRead() want to check whether they read the full
specified length. Checking this at every call site is very tedious.
This patch provides additional variants BufFileReadExact() and
BufFileReadMaybeEOF() that include the length checks.
I considered changing BufFileRead() itself, but this function is also
used in extensions, and so changing the behavior like this would
create a lot of problems there. The new names are analogous to the
existing LogicalTapeReadExact().
Discussion: https://www.postgresql.org/message-id/flat/f3501945-c591-8cc3-5ef0-b72a2e0eaa9c@enterprisedb.com
---
src/backend/access/gist/gistbuildbuffers.c | 7 +--
src/backend/backup/backup_manifest.c | 9 +---
src/backend/executor/nodeHashjoin.c | 18 ++------
src/backend/replication/logical/worker.c | 32 ++------------
src/backend/storage/file/buffile.c | 50 ++++++++++++++++++++--
src/backend/utils/sort/logtape.c | 9 +---
src/backend/utils/sort/sharedtuplestore.c | 49 +++------------------
src/backend/utils/sort/tuplestore.c | 29 +++----------
src/include/storage/buffile.h | 4 +-
9 files changed, 73 insertions(+), 134 deletions(-)
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 1a732dd5cf..3399a6ae68 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -753,14 +753,9 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
static void
ReadTempFileBlock(BufFile *file, long blknum, void *ptr)
{
- size_t nread;
-
if (BufFileSeekBlock(file, blknum) != 0)
elog(ERROR, "could not seek to block %ld in temporary file", blknum);
- nread = BufFileRead(file, ptr, BLCKSZ);
- if (nread != BLCKSZ)
- elog(ERROR, "could not read temporary file: read only %zu of %zu bytes",
- nread, (size_t) BLCKSZ);
+ BufFileReadExact(file, ptr, BLCKSZ);
}
static void
diff --git a/src/backend/backup/backup_manifest.c b/src/backend/backup/backup_manifest.c
index d325ef047a..fabd2ca299 100644
--- a/src/backend/backup/backup_manifest.c
+++ b/src/backend/backup/backup_manifest.c
@@ -362,17 +362,10 @@ SendBackupManifest(backup_manifest_info *manifest, bbsink *sink)
while (manifest_bytes_done < manifest->manifest_size)
{
size_t bytes_to_read;
- size_t rc;
bytes_to_read = Min(sink->bbs_buffer_length,
manifest->manifest_size - manifest_bytes_done);
- rc = BufFileRead(manifest->buffile, sink->bbs_buffer,
- bytes_to_read);
- if (rc != bytes_to_read)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from temporary file: read only %zu of %zu bytes",
- rc, bytes_to_read)));
+ BufFileReadExact(manifest->buffile, sink->bbs_buffer, bytes_to_read);
bbsink_manifest_contents(sink, bytes_to_read);
manifest_bytes_done += bytes_to_read;
}
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 9dbbd7f8c3..b215e3f59a 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -1260,28 +1260,18 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
* we can read them both in one BufFileRead() call without any type
* cheating.
*/
- nread = BufFileRead(file, header, sizeof(header));
+ nread = BufFileReadMaybeEOF(file, header, sizeof(header), true);
if (nread == 0) /* end of file */
{
ExecClearTuple(tupleSlot);
return NULL;
}
- if (nread != sizeof(header))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from hash-join temporary file: read only %zu of %zu bytes",
- nread, sizeof(header))));
*hashvalue = header[0];
tuple = (MinimalTuple) palloc(header[1]);
tuple->t_len = header[1];
- nread = BufFileRead(file,
- ((char *) tuple + sizeof(uint32)),
- header[1] - sizeof(uint32));
- if (nread != header[1] - sizeof(uint32))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from hash-join temporary file: read only %zu of %zu bytes",
- nread, header[1] - sizeof(uint32))));
+ BufFileReadExact(file,
+ (char *) tuple + sizeof(uint32),
+ header[1] - sizeof(uint32));
ExecForceStoreMinimalTuple(tuple, tupleSlot, true);
return tupleSlot;
}
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 64ce93e725..859cb3d4c6 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -1432,19 +1432,13 @@ apply_spooled_messages(TransactionId xid, XLogRecPtr lsn)
CHECK_FOR_INTERRUPTS();
/* read length of the on-disk record */
- nbytes = BufFileRead(fd, &len, sizeof(len));
+ nbytes = BufFileReadMaybeEOF(fd, &len, sizeof(len), true);
/* have we reached end of the file? */
if (nbytes == 0)
break;
/* do we have a correct length? */
- if (nbytes != sizeof(len))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's changes file \"%s\": read only %zu of %zu bytes",
- path, nbytes, sizeof(len))));
-
if (len <= 0)
elog(ERROR, "incorrect length %d in streaming transaction's changes file \"%s\"",
len, path);
@@ -1453,12 +1447,7 @@ apply_spooled_messages(TransactionId xid, XLogRecPtr lsn)
buffer = repalloc(buffer, len);
/* and finally read the data into the buffer */
- nbytes = BufFileRead(fd, buffer, len);
- if (nbytes != len)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's changes file \"%s\": read only %zu of %zu bytes",
- path, nbytes, (size_t) len)));
+ BufFileReadExact(fd, buffer, len);
/* copy the buffer to the stringinfo and call apply_dispatch */
resetStringInfo(&s2);
@@ -3222,7 +3211,6 @@ static void
subxact_info_read(Oid subid, TransactionId xid)
{
char path[MAXPGPATH];
- size_t nread;
Size len;
BufFile *fd;
MemoryContext oldctx;
@@ -3242,12 +3230,7 @@ subxact_info_read(Oid subid, TransactionId xid)
return;
/* read number of subxact items */
- nread = BufFileRead(fd, &subxact_data.nsubxacts, sizeof(subxact_data.nsubxacts));
- if (nread != sizeof(subxact_data.nsubxacts))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's subxact file \"%s\": read only %zu of %zu bytes",
- path, nread, sizeof(subxact_data.nsubxacts))));
+ BufFileReadExact(fd, &subxact_data.nsubxacts, sizeof(subxact_data.nsubxacts));
len = sizeof(SubXactInfo) * subxact_data.nsubxacts;
@@ -3266,14 +3249,7 @@ subxact_info_read(Oid subid, TransactionId xid)
MemoryContextSwitchTo(oldctx);
if (len > 0)
- {
- nread = BufFileRead(fd, subxact_data.subxacts, len);
- if (nread != len)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's subxact file \"%s\": read only %zu of %zu bytes",
- path, nread, len)));
- }
+ BufFileReadExact(fd, subxact_data.subxacts, len);
BufFileClose(fd);
}
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index 4a5bd0e224..c5464b6aa6 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -573,14 +573,19 @@ BufFileDumpBuffer(BufFile *file)
}
/*
- * BufFileRead
+ * BufFileRead variants
*
* Like fread() except we assume 1-byte element size and report I/O errors via
* ereport().
+ *
+ * If 'exact' is true, then an error is also raised if the number of bytes
+ * read is not exactly 'size' (no short reads). If 'exact' and 'eofOK' are
+ * true, then reading zero bytes is ok.
*/
-size_t
-BufFileRead(BufFile *file, void *ptr, size_t size)
+static size_t
+BufFileReadCommon(BufFile *file, void *ptr, size_t size, bool exact, bool eofOK)
{
+ size_t start_size = size;
size_t nread = 0;
size_t nthistime;
@@ -612,9 +617,48 @@ BufFileRead(BufFile *file, void *ptr, size_t size)
nread += nthistime;
}
+ if (exact &&
+ (nread != start_size && !(nread == 0 && eofOK)))
+ ereport(ERROR,
+ errcode_for_file_access(),
+ file->name ?
+ errmsg("could not read from file set \"%s\": read only %zu of %zu bytes",
+ file->name, nread, start_size) :
+ errmsg("could not read from temporary file: read only %zu of %zu bytes",
+ nread, start_size));
+
return nread;
}
+/*
+ * Legacy interface where the caller needs to check for end of file or short
+ * reads.
+ */
+size_t
+BufFileRead(BufFile *file, void *ptr, size_t size)
+{
+ return BufFileReadCommon(file, ptr, size, false, false);
+}
+
+/*
+ * Require read of exactly the specified size.
+ */
+void
+BufFileReadExact(BufFile *file, void *ptr, size_t size)
+{
+ BufFileReadCommon(file, ptr, size, true, false);
+}
+
+/*
+ * Require read of exactly the specified size, but optionally allow end of
+ * file (in which case 0 is returned).
+ */
+size_t
+BufFileReadMaybeEOF(BufFile *file, void *ptr, size_t size, bool eofOK)
+{
+ return BufFileReadCommon(file, ptr, size, true, eofOK);
+}
+
/*
* BufFileWrite
*
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index 907dd8f0a7..56ac0298c5 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -281,19 +281,12 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, const void *buffer)
static void
ltsReadBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
{
- size_t nread;
-
if (BufFileSeekBlock(lts->pfile, blocknum) != 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not seek to block %ld of temporary file",
blocknum)));
- nread = BufFileRead(lts->pfile, buffer, BLCKSZ);
- if (nread != BLCKSZ)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read block %ld of temporary file: read only %zu of %zu bytes",
- blocknum, nread, (size_t) BLCKSZ)));
+ BufFileReadExact(lts->pfile, buffer, BLCKSZ);
}
/*
diff --git a/src/backend/utils/sort/sharedtuplestore.c b/src/backend/utils/sort/sharedtuplestore.c
index 65bbe0a52d..e3da83f10b 100644
--- a/src/backend/utils/sort/sharedtuplestore.c
+++ b/src/backend/utils/sort/sharedtuplestore.c
@@ -422,23 +422,10 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
*/
if (accessor->sts->meta_data_size > 0)
{
- if (BufFileRead(accessor->read_file,
- meta_data,
- accessor->sts->meta_data_size) !=
- accessor->sts->meta_data_size)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading meta-data.")));
+ BufFileReadExact(accessor->read_file, meta_data, accessor->sts->meta_data_size);
accessor->read_bytes += accessor->sts->meta_data_size;
}
- if (BufFileRead(accessor->read_file,
- &size,
- sizeof(size)) != sizeof(size))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading size.")));
+ BufFileReadExact(accessor->read_file, &size, sizeof(size));
accessor->read_bytes += sizeof(size);
if (size > accessor->read_buffer_size)
{
@@ -455,13 +442,7 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
this_chunk_size = Min(remaining_size,
BLCKSZ * STS_CHUNK_PAGES - accessor->read_bytes);
destination = accessor->read_buffer + sizeof(uint32);
- if (BufFileRead(accessor->read_file,
- destination,
- this_chunk_size) != this_chunk_size)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading tuple.")));
+ BufFileReadExact(accessor->read_file, destination, this_chunk_size);
accessor->read_bytes += this_chunk_size;
remaining_size -= this_chunk_size;
destination += this_chunk_size;
@@ -473,12 +454,7 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
/* We are now positioned at the start of an overflow chunk. */
SharedTuplestoreChunk chunk_header;
- if (BufFileRead(accessor->read_file, &chunk_header, STS_CHUNK_HEADER_SIZE) !=
- STS_CHUNK_HEADER_SIZE)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading overflow chunk header.")));
+ BufFileReadExact(accessor->read_file, &chunk_header, STS_CHUNK_HEADER_SIZE);
accessor->read_bytes = STS_CHUNK_HEADER_SIZE;
if (chunk_header.overflow == 0)
ereport(ERROR,
@@ -489,13 +465,7 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
this_chunk_size = Min(remaining_size,
BLCKSZ * STS_CHUNK_PAGES -
STS_CHUNK_HEADER_SIZE);
- if (BufFileRead(accessor->read_file,
- destination,
- this_chunk_size) != this_chunk_size)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading tuple.")));
+ BufFileReadExact(accessor->read_file, destination, this_chunk_size);
accessor->read_bytes += this_chunk_size;
remaining_size -= this_chunk_size;
destination += this_chunk_size;
@@ -551,7 +521,6 @@ sts_parallel_scan_next(SharedTuplestoreAccessor *accessor, void *meta_data)
if (!eof)
{
SharedTuplestoreChunk chunk_header;
- size_t nread;
/* Make sure we have the file open. */
if (accessor->read_file == NULL)
@@ -570,13 +539,7 @@ sts_parallel_scan_next(SharedTuplestoreAccessor *accessor, void *meta_data)
(errcode_for_file_access(),
errmsg("could not seek to block %u in shared tuplestore temporary file",
read_page)));
- nread = BufFileRead(accessor->read_file, &chunk_header,
- STS_CHUNK_HEADER_SIZE);
- if (nread != STS_CHUNK_HEADER_SIZE)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file: read only %zu of %zu bytes",
- nread, STS_CHUNK_HEADER_SIZE)));
+ BufFileReadExact(accessor->read_file, &chunk_header, STS_CHUNK_HEADER_SIZE);
/*
* If this is an overflow chunk, we skip it and any following
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 44f1d9e855..bc36662198 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -1468,15 +1468,11 @@ getlen(Tuplestorestate *state, bool eofOK)
unsigned int len;
size_t nbytes;
- nbytes = BufFileRead(state->myfile, &len, sizeof(len));
- if (nbytes == sizeof(len))
+ nbytes = BufFileReadMaybeEOF(state->myfile, &len, sizeof(len), eofOK);
+ if (nbytes == 0)
+ return 0;
+ else
return len;
- if (nbytes != 0 || !eofOK)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from tuplestore temporary file: read only %zu of %zu bytes",
- nbytes, sizeof(len))));
- return 0;
}
@@ -1528,25 +1524,12 @@ readtup_heap(Tuplestorestate *state, unsigned int len)
unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
MinimalTuple tuple = (MinimalTuple) palloc(tuplen);
char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
- size_t nread;
USEMEM(state, GetMemoryChunkSpace(tuple));
/* read in the tuple proper */
tuple->t_len = tuplen;
- nread = BufFileRead(state->myfile, tupbody, tupbodylen);
- if (nread != (size_t) tupbodylen)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from tuplestore temporary file: read only %zu of %zu bytes",
- nread, (size_t) tupbodylen)));
+ BufFileReadExact(state->myfile, tupbody, tupbodylen);
if (state->backward) /* need trailing length word? */
- {
- nread = BufFileRead(state->myfile, &tuplen, sizeof(tuplen));
- if (nread != sizeof(tuplen))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from tuplestore temporary file: read only %zu of %zu bytes",
- nread, sizeof(tuplen))));
- }
+ BufFileReadExact(state->myfile, &tuplen, sizeof(tuplen));
return (void *) tuple;
}
diff --git a/src/include/storage/buffile.h b/src/include/storage/buffile.h
index 7ed5b03516..6583766719 100644
--- a/src/include/storage/buffile.h
+++ b/src/include/storage/buffile.h
@@ -38,7 +38,9 @@ typedef struct BufFile BufFile;
extern BufFile *BufFileCreateTemp(bool interXact);
extern void BufFileClose(BufFile *file);
-extern size_t BufFileRead(BufFile *file, void *ptr, size_t size);
+extern pg_nodiscard size_t BufFileRead(BufFile *file, void *ptr, size_t size);
+extern void BufFileReadExact(BufFile *file, void *ptr, size_t size);
+extern size_t BufFileReadMaybeEOF(BufFile *file, void *ptr, size_t size, bool eofOK);
extern void BufFileWrite(BufFile *file, const void *ptr, size_t size);
extern int BufFileSeek(BufFile *file, int fileno, off_t offset, int whence);
extern void BufFileTell(BufFile *file, int *fileno, off_t *offset);
--
2.39.0
On Fri, Jan 6, 2023 at 6:18 PM Peter Eisentraut
<peter.eisentraut@enterprisedb.com> wrote:
On 02.01.23 13:13, Amit Kapila wrote:
On Wed, Dec 28, 2022 at 4:17 PM Peter Eisentraut
<peter.eisentraut@enterprisedb.com> wrote:Most callers of BufFileRead() want to check whether they read the full
specified length. Checking this at every call site is very tedious.
This patch provides additional variants BufFileReadExact() and
BufFileReadMaybeEOF() that include the length checks.I considered changing BufFileRead() itself, but this function is also
used in extensions, and so changing the behavior like this would create
a lot of problems there. The new names are analogous to the existing
LogicalTapeReadExact().+1 for the new APIs. I have noticed that some of the existing places
use %m and the file path in messages which are not used in the new
common function.The existing uses of %m are wrong. This was already fixed once in
7897e3bb902c557412645b82120f4d95f7474906, but the affected areas of code
were apparently developed at around the same time and didn't get the
fix. So I have attached a separate patch to fix this first, which could
be backpatched.
+1. The patch is not getting applied due to a recent commit.
The original patch is then rebased on top of that. I have adjusted the
error message to include the file set name if available.What this doesn't keep is the purpose of the temp file in some cases,
like "hash-join temporary file". We could maybe make this an additional
argument or an error context, but it seems cumbersome in any case.
Yeah, we can do that but not sure if it is worth doing any of those
because there are already other places that don't use the exact
context.
--
With Regards,
Amit Kapila.
On 10.01.23 07:20, Amit Kapila wrote:
The existing uses of %m are wrong. This was already fixed once in
7897e3bb902c557412645b82120f4d95f7474906, but the affected areas of code
were apparently developed at around the same time and didn't get the
fix. So I have attached a separate patch to fix this first, which could
be backpatched.+1. The patch is not getting applied due to a recent commit.
The original patch is then rebased on top of that. I have adjusted the
error message to include the file set name if available.What this doesn't keep is the purpose of the temp file in some cases,
like "hash-join temporary file". We could maybe make this an additional
argument or an error context, but it seems cumbersome in any case.Yeah, we can do that but not sure if it is worth doing any of those
because there are already other places that don't use the exact
context.
Ok, updated patches attached.
Attachments:
v3-0001-Fix-some-BufFileRead-error-reporting.patchtext/plain; charset=UTF-8; name=v3-0001-Fix-some-BufFileRead-error-reporting.patchDownload
From d1cd2f2748ecf34831cee565b29725df548da567 Mon Sep 17 00:00:00 2001
From: Peter Eisentraut <peter@eisentraut.org>
Date: Fri, 6 Jan 2023 11:57:35 +0100
Subject: [PATCH v3 1/2] Fix some BufFileRead() error reporting
Remove "%m" from error messages where errno would be bogus. Add short
read byte counts where appropriate.
This is equivalent to what was done in
7897e3bb902c557412645b82120f4d95f7474906, but some code was apparently
developed concurrently to that and not updated accordingly.
Discussion: https://www.postgresql.org/message-id/flat/f3501945-c591-8cc3-5ef0-b72a2e0eaa9c@enterprisedb.com
---
src/backend/backup/backup_manifest.c | 3 ++-
src/backend/replication/logical/worker.c | 33 ++++++++++++++----------
2 files changed, 21 insertions(+), 15 deletions(-)
diff --git a/src/backend/backup/backup_manifest.c b/src/backend/backup/backup_manifest.c
index 21ca2941dc..d325ef047a 100644
--- a/src/backend/backup/backup_manifest.c
+++ b/src/backend/backup/backup_manifest.c
@@ -371,7 +371,8 @@ SendBackupManifest(backup_manifest_info *manifest, bbsink *sink)
if (rc != bytes_to_read)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from temporary file: %m")));
+ errmsg("could not read from temporary file: read only %zu of %zu bytes",
+ rc, bytes_to_read)));
bbsink_manifest_contents(sink, bytes_to_read);
manifest_bytes_done += bytes_to_read;
}
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 79cda39445..f3856c9843 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -2063,7 +2063,7 @@ apply_spooled_messages(FileSet *stream_fileset, TransactionId xid,
nchanges = 0;
while (true)
{
- int nbytes;
+ size_t nbytes;
int len;
CHECK_FOR_INTERRUPTS();
@@ -2079,8 +2079,8 @@ apply_spooled_messages(FileSet *stream_fileset, TransactionId xid,
if (nbytes != sizeof(len))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from streaming transaction's changes file \"%s\": %m",
- path)));
+ errmsg("could not read from streaming transaction's changes file \"%s\": read only %zu of %zu bytes",
+ path, nbytes, sizeof(len))));
if (len <= 0)
elog(ERROR, "incorrect length %d in streaming transaction's changes file \"%s\"",
@@ -2090,11 +2090,12 @@ apply_spooled_messages(FileSet *stream_fileset, TransactionId xid,
buffer = repalloc(buffer, len);
/* and finally read the data into the buffer */
- if (BufFileRead(stream_fd, buffer, len) != len)
+ nbytes = BufFileRead(stream_fd, buffer, len);
+ if (nbytes != len)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from streaming transaction's changes file \"%s\": %m",
- path)));
+ errmsg("could not read from streaming transaction's changes file \"%s\": read only %zu of %zu bytes",
+ path, nbytes, (size_t) len)));
BufFileTell(stream_fd, &fileno, &offset);
@@ -3992,6 +3993,7 @@ static void
subxact_info_read(Oid subid, TransactionId xid)
{
char path[MAXPGPATH];
+ size_t nread;
Size len;
BufFile *fd;
MemoryContext oldctx;
@@ -4011,13 +4013,12 @@ subxact_info_read(Oid subid, TransactionId xid)
return;
/* read number of subxact items */
- if (BufFileRead(fd, &subxact_data.nsubxacts,
- sizeof(subxact_data.nsubxacts)) !=
- sizeof(subxact_data.nsubxacts))
+ nread = BufFileRead(fd, &subxact_data.nsubxacts, sizeof(subxact_data.nsubxacts));
+ if (nread != sizeof(subxact_data.nsubxacts))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from streaming transaction's subxact file \"%s\": %m",
- path)));
+ errmsg("could not read from streaming transaction's subxact file \"%s\": read only %zu of %zu bytes",
+ path, nread, sizeof(subxact_data.nsubxacts))));
len = sizeof(SubXactInfo) * subxact_data.nsubxacts;
@@ -4035,11 +4036,15 @@ subxact_info_read(Oid subid, TransactionId xid)
sizeof(SubXactInfo));
MemoryContextSwitchTo(oldctx);
- if ((len > 0) && ((BufFileRead(fd, subxact_data.subxacts, len)) != len))
+ if (len > 0)
+ {
+ nread = BufFileRead(fd, subxact_data.subxacts, len);
+ if (nread != len)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from streaming transaction's subxact file \"%s\": %m",
- path)));
+ errmsg("could not read from streaming transaction's subxact file \"%s\": read only %zu of %zu bytes",
+ path, nread, len)));
+ }
BufFileClose(fd);
}
base-commit: c8ad4d8166aabd6ed5124e7e432166637d0fe646
--
2.39.0
v3-0002-Add-BufFileRead-variants-with-short-read-and-EOF-.patchtext/plain; charset=UTF-8; name=v3-0002-Add-BufFileRead-variants-with-short-read-and-EOF-.patchDownload
From 03d64c2e7a4d8f869fadff52d184f8d1373a98e2 Mon Sep 17 00:00:00 2001
From: Peter Eisentraut <peter@eisentraut.org>
Date: Fri, 6 Jan 2023 12:13:49 +0100
Subject: [PATCH v3 2/2] Add BufFileRead variants with short read and EOF
detection
Most callers of BufFileRead() want to check whether they read the full
specified length. Checking this at every call site is very tedious.
This patch provides additional variants BufFileReadExact() and
BufFileReadMaybeEOF() that include the length checks.
I considered changing BufFileRead() itself, but this function is also
used in extensions, and so changing the behavior like this would
create a lot of problems there. The new names are analogous to the
existing LogicalTapeReadExact().
Discussion: https://www.postgresql.org/message-id/flat/f3501945-c591-8cc3-5ef0-b72a2e0eaa9c@enterprisedb.com
---
src/backend/access/gist/gistbuildbuffers.c | 7 +--
src/backend/backup/backup_manifest.c | 9 +---
src/backend/executor/nodeHashjoin.c | 18 ++------
src/backend/replication/logical/worker.c | 32 ++------------
src/backend/storage/file/buffile.c | 50 ++++++++++++++++++++--
src/backend/utils/sort/logtape.c | 9 +---
src/backend/utils/sort/sharedtuplestore.c | 49 +++------------------
src/backend/utils/sort/tuplestore.c | 29 +++----------
src/include/storage/buffile.h | 4 +-
9 files changed, 73 insertions(+), 134 deletions(-)
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 1a732dd5cf..3399a6ae68 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -753,14 +753,9 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
static void
ReadTempFileBlock(BufFile *file, long blknum, void *ptr)
{
- size_t nread;
-
if (BufFileSeekBlock(file, blknum) != 0)
elog(ERROR, "could not seek to block %ld in temporary file", blknum);
- nread = BufFileRead(file, ptr, BLCKSZ);
- if (nread != BLCKSZ)
- elog(ERROR, "could not read temporary file: read only %zu of %zu bytes",
- nread, (size_t) BLCKSZ);
+ BufFileReadExact(file, ptr, BLCKSZ);
}
static void
diff --git a/src/backend/backup/backup_manifest.c b/src/backend/backup/backup_manifest.c
index d325ef047a..fabd2ca299 100644
--- a/src/backend/backup/backup_manifest.c
+++ b/src/backend/backup/backup_manifest.c
@@ -362,17 +362,10 @@ SendBackupManifest(backup_manifest_info *manifest, bbsink *sink)
while (manifest_bytes_done < manifest->manifest_size)
{
size_t bytes_to_read;
- size_t rc;
bytes_to_read = Min(sink->bbs_buffer_length,
manifest->manifest_size - manifest_bytes_done);
- rc = BufFileRead(manifest->buffile, sink->bbs_buffer,
- bytes_to_read);
- if (rc != bytes_to_read)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from temporary file: read only %zu of %zu bytes",
- rc, bytes_to_read)));
+ BufFileReadExact(manifest->buffile, sink->bbs_buffer, bytes_to_read);
bbsink_manifest_contents(sink, bytes_to_read);
manifest_bytes_done += bytes_to_read;
}
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 9dbbd7f8c3..b215e3f59a 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -1260,28 +1260,18 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
* we can read them both in one BufFileRead() call without any type
* cheating.
*/
- nread = BufFileRead(file, header, sizeof(header));
+ nread = BufFileReadMaybeEOF(file, header, sizeof(header), true);
if (nread == 0) /* end of file */
{
ExecClearTuple(tupleSlot);
return NULL;
}
- if (nread != sizeof(header))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from hash-join temporary file: read only %zu of %zu bytes",
- nread, sizeof(header))));
*hashvalue = header[0];
tuple = (MinimalTuple) palloc(header[1]);
tuple->t_len = header[1];
- nread = BufFileRead(file,
- ((char *) tuple + sizeof(uint32)),
- header[1] - sizeof(uint32));
- if (nread != header[1] - sizeof(uint32))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from hash-join temporary file: read only %zu of %zu bytes",
- nread, header[1] - sizeof(uint32))));
+ BufFileReadExact(file,
+ (char *) tuple + sizeof(uint32),
+ header[1] - sizeof(uint32));
ExecForceStoreMinimalTuple(tuple, tupleSlot, true);
return tupleSlot;
}
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index f3856c9843..d8b8a374c6 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -2069,19 +2069,13 @@ apply_spooled_messages(FileSet *stream_fileset, TransactionId xid,
CHECK_FOR_INTERRUPTS();
/* read length of the on-disk record */
- nbytes = BufFileRead(stream_fd, &len, sizeof(len));
+ nbytes = BufFileReadMaybeEOF(stream_fd, &len, sizeof(len), true);
/* have we reached end of the file? */
if (nbytes == 0)
break;
/* do we have a correct length? */
- if (nbytes != sizeof(len))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's changes file \"%s\": read only %zu of %zu bytes",
- path, nbytes, sizeof(len))));
-
if (len <= 0)
elog(ERROR, "incorrect length %d in streaming transaction's changes file \"%s\"",
len, path);
@@ -2090,12 +2084,7 @@ apply_spooled_messages(FileSet *stream_fileset, TransactionId xid,
buffer = repalloc(buffer, len);
/* and finally read the data into the buffer */
- nbytes = BufFileRead(stream_fd, buffer, len);
- if (nbytes != len)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's changes file \"%s\": read only %zu of %zu bytes",
- path, nbytes, (size_t) len)));
+ BufFileReadExact(stream_fd, buffer, len);
BufFileTell(stream_fd, &fileno, &offset);
@@ -3993,7 +3982,6 @@ static void
subxact_info_read(Oid subid, TransactionId xid)
{
char path[MAXPGPATH];
- size_t nread;
Size len;
BufFile *fd;
MemoryContext oldctx;
@@ -4013,12 +4001,7 @@ subxact_info_read(Oid subid, TransactionId xid)
return;
/* read number of subxact items */
- nread = BufFileRead(fd, &subxact_data.nsubxacts, sizeof(subxact_data.nsubxacts));
- if (nread != sizeof(subxact_data.nsubxacts))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's subxact file \"%s\": read only %zu of %zu bytes",
- path, nread, sizeof(subxact_data.nsubxacts))));
+ BufFileReadExact(fd, &subxact_data.nsubxacts, sizeof(subxact_data.nsubxacts));
len = sizeof(SubXactInfo) * subxact_data.nsubxacts;
@@ -4037,14 +4020,7 @@ subxact_info_read(Oid subid, TransactionId xid)
MemoryContextSwitchTo(oldctx);
if (len > 0)
- {
- nread = BufFileRead(fd, subxact_data.subxacts, len);
- if (nread != len)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from streaming transaction's subxact file \"%s\": read only %zu of %zu bytes",
- path, nread, len)));
- }
+ BufFileReadExact(fd, subxact_data.subxacts, len);
BufFileClose(fd);
}
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index 4a5bd0e224..c5464b6aa6 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -573,14 +573,19 @@ BufFileDumpBuffer(BufFile *file)
}
/*
- * BufFileRead
+ * BufFileRead variants
*
* Like fread() except we assume 1-byte element size and report I/O errors via
* ereport().
+ *
+ * If 'exact' is true, then an error is also raised if the number of bytes
+ * read is not exactly 'size' (no short reads). If 'exact' and 'eofOK' are
+ * true, then reading zero bytes is ok.
*/
-size_t
-BufFileRead(BufFile *file, void *ptr, size_t size)
+static size_t
+BufFileReadCommon(BufFile *file, void *ptr, size_t size, bool exact, bool eofOK)
{
+ size_t start_size = size;
size_t nread = 0;
size_t nthistime;
@@ -612,9 +617,48 @@ BufFileRead(BufFile *file, void *ptr, size_t size)
nread += nthistime;
}
+ if (exact &&
+ (nread != start_size && !(nread == 0 && eofOK)))
+ ereport(ERROR,
+ errcode_for_file_access(),
+ file->name ?
+ errmsg("could not read from file set \"%s\": read only %zu of %zu bytes",
+ file->name, nread, start_size) :
+ errmsg("could not read from temporary file: read only %zu of %zu bytes",
+ nread, start_size));
+
return nread;
}
+/*
+ * Legacy interface where the caller needs to check for end of file or short
+ * reads.
+ */
+size_t
+BufFileRead(BufFile *file, void *ptr, size_t size)
+{
+ return BufFileReadCommon(file, ptr, size, false, false);
+}
+
+/*
+ * Require read of exactly the specified size.
+ */
+void
+BufFileReadExact(BufFile *file, void *ptr, size_t size)
+{
+ BufFileReadCommon(file, ptr, size, true, false);
+}
+
+/*
+ * Require read of exactly the specified size, but optionally allow end of
+ * file (in which case 0 is returned).
+ */
+size_t
+BufFileReadMaybeEOF(BufFile *file, void *ptr, size_t size, bool eofOK)
+{
+ return BufFileReadCommon(file, ptr, size, true, eofOK);
+}
+
/*
* BufFileWrite
*
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index 907dd8f0a7..56ac0298c5 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -281,19 +281,12 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, const void *buffer)
static void
ltsReadBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
{
- size_t nread;
-
if (BufFileSeekBlock(lts->pfile, blocknum) != 0)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not seek to block %ld of temporary file",
blocknum)));
- nread = BufFileRead(lts->pfile, buffer, BLCKSZ);
- if (nread != BLCKSZ)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read block %ld of temporary file: read only %zu of %zu bytes",
- blocknum, nread, (size_t) BLCKSZ)));
+ BufFileReadExact(lts->pfile, buffer, BLCKSZ);
}
/*
diff --git a/src/backend/utils/sort/sharedtuplestore.c b/src/backend/utils/sort/sharedtuplestore.c
index 65bbe0a52d..e3da83f10b 100644
--- a/src/backend/utils/sort/sharedtuplestore.c
+++ b/src/backend/utils/sort/sharedtuplestore.c
@@ -422,23 +422,10 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
*/
if (accessor->sts->meta_data_size > 0)
{
- if (BufFileRead(accessor->read_file,
- meta_data,
- accessor->sts->meta_data_size) !=
- accessor->sts->meta_data_size)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading meta-data.")));
+ BufFileReadExact(accessor->read_file, meta_data, accessor->sts->meta_data_size);
accessor->read_bytes += accessor->sts->meta_data_size;
}
- if (BufFileRead(accessor->read_file,
- &size,
- sizeof(size)) != sizeof(size))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading size.")));
+ BufFileReadExact(accessor->read_file, &size, sizeof(size));
accessor->read_bytes += sizeof(size);
if (size > accessor->read_buffer_size)
{
@@ -455,13 +442,7 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
this_chunk_size = Min(remaining_size,
BLCKSZ * STS_CHUNK_PAGES - accessor->read_bytes);
destination = accessor->read_buffer + sizeof(uint32);
- if (BufFileRead(accessor->read_file,
- destination,
- this_chunk_size) != this_chunk_size)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading tuple.")));
+ BufFileReadExact(accessor->read_file, destination, this_chunk_size);
accessor->read_bytes += this_chunk_size;
remaining_size -= this_chunk_size;
destination += this_chunk_size;
@@ -473,12 +454,7 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
/* We are now positioned at the start of an overflow chunk. */
SharedTuplestoreChunk chunk_header;
- if (BufFileRead(accessor->read_file, &chunk_header, STS_CHUNK_HEADER_SIZE) !=
- STS_CHUNK_HEADER_SIZE)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading overflow chunk header.")));
+ BufFileReadExact(accessor->read_file, &chunk_header, STS_CHUNK_HEADER_SIZE);
accessor->read_bytes = STS_CHUNK_HEADER_SIZE;
if (chunk_header.overflow == 0)
ereport(ERROR,
@@ -489,13 +465,7 @@ sts_read_tuple(SharedTuplestoreAccessor *accessor, void *meta_data)
this_chunk_size = Min(remaining_size,
BLCKSZ * STS_CHUNK_PAGES -
STS_CHUNK_HEADER_SIZE);
- if (BufFileRead(accessor->read_file,
- destination,
- this_chunk_size) != this_chunk_size)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file"),
- errdetail_internal("Short read while reading tuple.")));
+ BufFileReadExact(accessor->read_file, destination, this_chunk_size);
accessor->read_bytes += this_chunk_size;
remaining_size -= this_chunk_size;
destination += this_chunk_size;
@@ -551,7 +521,6 @@ sts_parallel_scan_next(SharedTuplestoreAccessor *accessor, void *meta_data)
if (!eof)
{
SharedTuplestoreChunk chunk_header;
- size_t nread;
/* Make sure we have the file open. */
if (accessor->read_file == NULL)
@@ -570,13 +539,7 @@ sts_parallel_scan_next(SharedTuplestoreAccessor *accessor, void *meta_data)
(errcode_for_file_access(),
errmsg("could not seek to block %u in shared tuplestore temporary file",
read_page)));
- nread = BufFileRead(accessor->read_file, &chunk_header,
- STS_CHUNK_HEADER_SIZE);
- if (nread != STS_CHUNK_HEADER_SIZE)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from shared tuplestore temporary file: read only %zu of %zu bytes",
- nread, STS_CHUNK_HEADER_SIZE)));
+ BufFileReadExact(accessor->read_file, &chunk_header, STS_CHUNK_HEADER_SIZE);
/*
* If this is an overflow chunk, we skip it and any following
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 44f1d9e855..bc36662198 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -1468,15 +1468,11 @@ getlen(Tuplestorestate *state, bool eofOK)
unsigned int len;
size_t nbytes;
- nbytes = BufFileRead(state->myfile, &len, sizeof(len));
- if (nbytes == sizeof(len))
+ nbytes = BufFileReadMaybeEOF(state->myfile, &len, sizeof(len), eofOK);
+ if (nbytes == 0)
+ return 0;
+ else
return len;
- if (nbytes != 0 || !eofOK)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from tuplestore temporary file: read only %zu of %zu bytes",
- nbytes, sizeof(len))));
- return 0;
}
@@ -1528,25 +1524,12 @@ readtup_heap(Tuplestorestate *state, unsigned int len)
unsigned int tuplen = tupbodylen + MINIMAL_TUPLE_DATA_OFFSET;
MinimalTuple tuple = (MinimalTuple) palloc(tuplen);
char *tupbody = (char *) tuple + MINIMAL_TUPLE_DATA_OFFSET;
- size_t nread;
USEMEM(state, GetMemoryChunkSpace(tuple));
/* read in the tuple proper */
tuple->t_len = tuplen;
- nread = BufFileRead(state->myfile, tupbody, tupbodylen);
- if (nread != (size_t) tupbodylen)
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from tuplestore temporary file: read only %zu of %zu bytes",
- nread, (size_t) tupbodylen)));
+ BufFileReadExact(state->myfile, tupbody, tupbodylen);
if (state->backward) /* need trailing length word? */
- {
- nread = BufFileRead(state->myfile, &tuplen, sizeof(tuplen));
- if (nread != sizeof(tuplen))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not read from tuplestore temporary file: read only %zu of %zu bytes",
- nread, sizeof(tuplen))));
- }
+ BufFileReadExact(state->myfile, &tuplen, sizeof(tuplen));
return (void *) tuple;
}
diff --git a/src/include/storage/buffile.h b/src/include/storage/buffile.h
index 7ed5b03516..6583766719 100644
--- a/src/include/storage/buffile.h
+++ b/src/include/storage/buffile.h
@@ -38,7 +38,9 @@ typedef struct BufFile BufFile;
extern BufFile *BufFileCreateTemp(bool interXact);
extern void BufFileClose(BufFile *file);
-extern size_t BufFileRead(BufFile *file, void *ptr, size_t size);
+extern pg_nodiscard size_t BufFileRead(BufFile *file, void *ptr, size_t size);
+extern void BufFileReadExact(BufFile *file, void *ptr, size_t size);
+extern size_t BufFileReadMaybeEOF(BufFile *file, void *ptr, size_t size, bool eofOK);
extern void BufFileWrite(BufFile *file, const void *ptr, size_t size);
extern int BufFileSeek(BufFile *file, int fileno, off_t offset, int whence);
extern void BufFileTell(BufFile *file, int *fileno, off_t *offset);
--
2.39.0
On Thu, Jan 12, 2023 at 2:44 PM Peter Eisentraut
<peter.eisentraut@enterprisedb.com> wrote:
On 10.01.23 07:20, Amit Kapila wrote:
Yeah, we can do that but not sure if it is worth doing any of those
because there are already other places that don't use the exact
context.Ok, updated patches attached.
Both the patches look good to me.
--
With Regards,
Amit Kapila.
On 14.01.23 07:01, Amit Kapila wrote:
On Thu, Jan 12, 2023 at 2:44 PM Peter Eisentraut
<peter.eisentraut@enterprisedb.com> wrote:On 10.01.23 07:20, Amit Kapila wrote:
Yeah, we can do that but not sure if it is worth doing any of those
because there are already other places that don't use the exact
context.Ok, updated patches attached.
Both the patches look good to me.
Committed, and the first part backpatched.