From 904a31bb8f519f5a9e4b30d9010edf506cddad1f Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Tue, 17 Jun 2025 17:06:45 -0400
Subject: [PATCH v1 03/14] Introduce heap-specific wrapper for
 visibilitymap_set()

visibilitymap_set(), which sets bits in the visibility map corresponding
to the heap block of the table passed in, arguably contains some
layering violations.

For example, it sets the heap page's LSN when checksums/wal_log_hints
are enabled. However, the caller may not need to set PD_ALL_VISIBLE
(when it is already set) and thus may not have marked the buffer dirty.
visibilitymap_set() will still set the page LSN in this case, even when
it would have been correct for the caller to *not* mark the buffer
dirty.

Also, every caller that needs to has to remember to set PD_ALL_VISIBLE
and mark the buffer dirty. This commit introduces a wrapper that does
this and a flag to visibilitymap_set() indicating whether or not the
heap page LSN should be set.
---
 src/backend/access/heap/heapam.c        | 62 ++++++++++++++++++-------
 src/backend/access/heap/heapam_xlog.c   |  2 +-
 src/backend/access/heap/vacuumlazy.c    | 60 ++++++------------------
 src/backend/access/heap/visibilitymap.c | 19 ++++----
 src/include/access/heapam.h             |  3 ++
 src/include/access/visibilitymap.h      |  2 +-
 6 files changed, 73 insertions(+), 75 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 0dcd6ee817e..dc409fd3a60 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -2505,8 +2505,6 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
 								BufferGetBlockNumber(buffer),
 								vmbuffer, VISIBILITYMAP_VALID_BITS);
 		}
-		else if (all_frozen_set)
-			PageSetAllVisible(page);
 
 		/*
 		 * XXX Should we set PageSetPrunable on this page ? See heap_insert()
@@ -2632,23 +2630,16 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
 
 		/*
 		 * If we've frozen everything on the page, update the visibilitymap.
-		 * We're already holding pin on the vmbuffer.
+		 * We're already holding pin on the vmbuffer. It's fine to use
+		 * InvalidTransactionId here - this is only used when
+		 * HEAP_INSERT_FROZEN is specified, which intentionally violates
+		 * visibility rules.
 		 */
 		if (all_frozen_set)
-		{
-			Assert(PageIsAllVisible(page));
-			Assert(visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer));
-
-			/*
-			 * It's fine to use InvalidTransactionId here - this is only used
-			 * when HEAP_INSERT_FROZEN is specified, which intentionally
-			 * violates visibility rules.
-			 */
-			visibilitymap_set(relation, BufferGetBlockNumber(buffer), buffer,
-							  InvalidXLogRecPtr, vmbuffer,
-							  InvalidTransactionId,
-							  VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
-		}
+			heap_page_set_vm_and_log(relation, BufferGetBlockNumber(buffer), buffer,
+									 vmbuffer,
+									 InvalidTransactionId,
+									 VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
 
 		UnlockReleaseBuffer(buffer);
 		ndone += nthispage;
@@ -7840,6 +7831,43 @@ heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
 	return false;
 }
 
+/*
+ * Make the heap and VM page changes needed to set a page all-visible.
+ * Do not call in recovery.
+ */
+uint8
+heap_page_set_vm_and_log(Relation rel, BlockNumber heap_blk, Buffer heap_buf,
+						 Buffer vmbuf, TransactionId cutoff_xid,
+						 uint8 vmflags)
+{
+	Page		heap_page = BufferGetPage(heap_buf);
+	bool		set_heap_lsn = false;
+
+	Assert(BufferIsValid(heap_buf));
+
+	/* Check that we have the right heap page pinned, if present */
+	if (BufferGetBlockNumber(heap_buf) != heap_blk)
+		elog(ERROR, "wrong heap buffer passed to heap_page_set_vm_and_log");
+
+	/*
+	 * We must never end up with the VM bit set and the page-level
+	 * PD_ALL_VISIBLE bit clear. If that were to occur, a subsequent page
+	 * modification would fail to clear the VM bit. Though it is possible for
+	 * the page-level bit to be set and the VM bit to be clear if checksums
+	 * and wal_log_hints are not enabled.
+	 */
+	if (!PageIsAllVisible(heap_page))
+	{
+		PageSetAllVisible(heap_page);
+		MarkBufferDirty(heap_buf);
+		if (XLogHintBitIsNeeded())
+			set_heap_lsn = true;
+	}
+
+	return visibilitymap_set(rel, heap_blk, heap_buf,
+							 InvalidXLogRecPtr, vmbuf, cutoff_xid, vmflags, set_heap_lsn);
+}
+
 /*
  * heap_tuple_should_freeze
  *
diff --git a/src/backend/access/heap/heapam_xlog.c b/src/backend/access/heap/heapam_xlog.c
index fa94e104f1c..cfd4fc3327d 100644
--- a/src/backend/access/heap/heapam_xlog.c
+++ b/src/backend/access/heap/heapam_xlog.c
@@ -298,7 +298,7 @@ heap_xlog_visible(XLogReaderState *record)
 		visibilitymap_pin(reln, blkno, &vmbuffer);
 
 		visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
-						  xlrec->snapshotConflictHorizon, vmbits);
+						  xlrec->snapshotConflictHorizon, vmbits, false);
 
 		ReleaseBuffer(vmbuffer);
 		FreeFakeRelcacheEntry(reln);
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index c8da2f835c4..5e662936dd7 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -1892,12 +1892,10 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
 				PageGetLSN(page) == InvalidXLogRecPtr)
 				log_newpage_buffer(buf, true);
 
-			PageSetAllVisible(page);
-			old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
-										   InvalidXLogRecPtr,
-										   vmbuffer, InvalidTransactionId,
-										   VISIBILITYMAP_ALL_VISIBLE |
-										   VISIBILITYMAP_ALL_FROZEN);
+			old_vmbits = heap_page_set_vm_and_log(vacrel->rel, blkno, buf,
+												  vmbuffer, InvalidTransactionId,
+												  VISIBILITYMAP_ALL_VISIBLE |
+												  VISIBILITYMAP_ALL_FROZEN);
 			END_CRIT_SECTION();
 
 			/* VM bits cannot have been set if PD_ALL_VISIBLE was clear */
@@ -2074,25 +2072,9 @@ lazy_scan_prune(LVRelState *vacrel,
 			flags |= VISIBILITYMAP_ALL_FROZEN;
 		}
 
-		/*
-		 * It should never be the case that the visibility map page is set
-		 * while the page-level bit is clear, but the reverse is allowed (if
-		 * checksums are not enabled).  Regardless, set both bits so that we
-		 * get back in sync.
-		 *
-		 * NB: If the heap page is all-visible but the VM bit is not set, we
-		 * don't need to dirty the heap page.  However, if checksums are
-		 * enabled, we do need to make sure that the heap page is dirtied
-		 * before passing it to visibilitymap_set(), because it may be logged.
-		 * Given that this situation should only happen in rare cases after a
-		 * crash, it is not worth optimizing.
-		 */
-		PageSetAllVisible(page);
-		MarkBufferDirty(buf);
-		old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
-									   InvalidXLogRecPtr,
-									   vmbuffer, presult.vm_conflict_horizon,
-									   flags);
+		old_vmbits = heap_page_set_vm_and_log(vacrel->rel, blkno, buf,
+											  vmbuffer, presult.vm_conflict_horizon,
+											  flags);
 
 		/*
 		 * If the page wasn't already set all-visible and/or all-frozen in the
@@ -2164,17 +2146,6 @@ lazy_scan_prune(LVRelState *vacrel,
 	{
 		uint8		old_vmbits;
 
-		/*
-		 * Avoid relying on all_visible_according_to_vm as a proxy for the
-		 * page-level PD_ALL_VISIBLE bit being set, since it might have become
-		 * stale -- even when all_visible is set
-		 */
-		if (!PageIsAllVisible(page))
-		{
-			PageSetAllVisible(page);
-			MarkBufferDirty(buf);
-		}
-
 		/*
 		 * Set the page all-frozen (and all-visible) in the VM.
 		 *
@@ -2183,11 +2154,10 @@ lazy_scan_prune(LVRelState *vacrel,
 		 * was logged when the page's tuples were frozen.
 		 */
 		Assert(!TransactionIdIsValid(presult.vm_conflict_horizon));
-		old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
-									   InvalidXLogRecPtr,
-									   vmbuffer, InvalidTransactionId,
-									   VISIBILITYMAP_ALL_VISIBLE |
-									   VISIBILITYMAP_ALL_FROZEN);
+		old_vmbits = heap_page_set_vm_and_log(vacrel->rel, blkno, buf,
+											  vmbuffer, InvalidTransactionId,
+											  VISIBILITYMAP_ALL_VISIBLE |
+											  VISIBILITYMAP_ALL_FROZEN);
 
 		/*
 		 * The page was likely already set all-visible in the VM. However,
@@ -2919,11 +2889,9 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
 			flags |= VISIBILITYMAP_ALL_FROZEN;
 		}
 
-		PageSetAllVisible(page);
-		old_vmbits = visibilitymap_set(vacrel->rel, blkno, buffer,
-									   InvalidXLogRecPtr,
-									   vmbuffer, visibility_cutoff_xid,
-									   flags);
+		old_vmbits = heap_page_set_vm_and_log(vacrel->rel, blkno, buffer,
+											  vmbuffer, visibility_cutoff_xid,
+											  flags);
 
 		/* We know the page should not have been all-visible */
 		Assert((old_vmbits & VISIBILITYMAP_VALID_BITS) == 0);
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index 745a04ef26e..45721399122 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -232,9 +232,10 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
  * when a page that is already all-visible is being marked all-frozen.
  *
  * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
- * this function. Except in recovery, caller should also pass the heap
- * buffer. When checksums are enabled and we're not in recovery, we must add
- * the heap buffer to the WAL chain to protect it from being torn.
+ * this function. Except in recovery, caller should also pass the heap buffer.
+ * When checksums are enabled and we're not in recovery, if the heap page was
+ * modified, we must add the heap buffer to the WAL chain to protect it from
+ * being torn.
  *
  * You must pass a buffer containing the correct map page to this function.
  * Call visibilitymap_pin first to pin the right one. This function doesn't do
@@ -245,7 +246,7 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
 uint8
 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
 				  XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
-				  uint8 flags)
+				  uint8 flags, bool set_heap_lsn)
 {
 	BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
 	uint32		mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
@@ -259,16 +260,12 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
 #endif
 
 	Assert(InRecovery || XLogRecPtrIsInvalid(recptr));
-	Assert(InRecovery || PageIsAllVisible((Page) BufferGetPage(heapBuf)));
+	Assert(!(InRecovery && set_heap_lsn));
 	Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
 
 	/* Must never set all_frozen bit without also setting all_visible bit */
 	Assert(flags != VISIBILITYMAP_ALL_FROZEN);
 
-	/* Check that we have the right heap page pinned, if present */
-	if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
-		elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
-
 	/* Check that we have the right VM page pinned */
 	if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
 		elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
@@ -301,10 +298,12 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
 				 * WAL record inserted above, so it would be incorrect to
 				 * update the heap page's LSN.
 				 */
-				if (XLogHintBitIsNeeded())
+				if (set_heap_lsn)
 				{
 					Page		heapPage = BufferGetPage(heapBuf);
 
+					Assert(XLogHintBitIsNeeded());
+
 					PageSetLSN(heapPage, recptr);
 				}
 			}
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 3a9424c19c9..9375296062f 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -360,6 +360,9 @@ extern bool heap_tuple_should_freeze(HeapTupleHeader tuple,
 									 TransactionId *NoFreezePageRelfrozenXid,
 									 MultiXactId *NoFreezePageRelminMxid);
 extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple);
+extern uint8 heap_page_set_vm_and_log(Relation rel, BlockNumber heap_blk, Buffer heap_buf,
+									  Buffer vmbuf, TransactionId cutoff_xid,
+									  uint8 vmflags);
 
 extern void simple_heap_insert(Relation relation, HeapTuple tup);
 extern void simple_heap_delete(Relation relation, ItemPointer tid);
diff --git a/src/include/access/visibilitymap.h b/src/include/access/visibilitymap.h
index be21c6dd1a3..4fa4f837535 100644
--- a/src/include/access/visibilitymap.h
+++ b/src/include/access/visibilitymap.h
@@ -36,7 +36,7 @@ extern uint8 visibilitymap_set(Relation rel,
 							   XLogRecPtr recptr,
 							   Buffer vmBuf,
 							   TransactionId cutoff_xid,
-							   uint8 flags);
+							   uint8 flags, bool set_heap_lsn);
 extern uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf);
 extern void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen);
 extern BlockNumber visibilitymap_prepare_truncate(Relation rel,
-- 
2.34.1

