From 27e431e8dc69bbf09d831cb1cf2903d16f177d74 Mon Sep 17 00:00:00 2001
From: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date: Wed, 6 Mar 2024 20:58:57 +0200
Subject: [PATCH v6 6/9] Move vmbuffer back to a local varible in
 lazy_scan_heap()

It felt confusing that we passed around the current block, 'blkno', as
an argument to lazy_scan_new_or_empty() and lazy_scan_prune(), but
'vmbuffer' was accessed directly in the 'scan_state'.

It was also a bit vague, when exactly 'vmbuffer' was valid. Calling
heap_vac_scan_get_next_block() set it, sometimes, to a buffer that
might or might not contain the VM bit for 'blkno'. But other
functions, like lazy_scan_prune(), assumed it to contain the correct
buffer. That was fixed up visibilitymap_pin(). But clearly it was not
"owned" by heap_vac_scan_get_next_block(), like the other 'scan_state'
fields.

I moved it back to a local variable, like it was. Maybe there would be
even better ways to handle it, but at least this is not worse than
what we have in master currently.
---
 src/backend/access/heap/vacuumlazy.c | 72 +++++++++++++---------------
 1 file changed, 33 insertions(+), 39 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 51391870bf3..3f1661cea61 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -213,8 +213,6 @@ typedef struct LVRelState
 	{
 		/* Next unskippable block */
 		BlockNumber next_unskippable_block;
-		/* Buffer containing next unskippable block's visibility info */
-		Buffer		vmbuffer;
 		/* Next unskippable block's visibility status */
 		bool		next_unskippable_allvis;
 	}			skip;
@@ -232,13 +230,14 @@ typedef struct LVSavedErrInfo
 static void lazy_scan_heap(LVRelState *vacrel);
 static bool heap_vac_scan_get_next_block(LVRelState *vacrel, BlockNumber next_block,
 										 BlockNumber *blkno,
-										 bool *all_visible_according_to_vm);
+										 bool *all_visible_according_to_vm,
+										 Buffer *vmbuffer);
 static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
 								   BlockNumber blkno, Page page,
-								   bool sharelock);
+								   bool sharelock, Buffer vmbuffer);
 static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
 							BlockNumber blkno, Page page,
-							bool all_visible_according_to_vm,
+							Buffer vmbuffer, bool all_visible_according_to_vm,
 							bool *has_lpdead_items);
 static bool lazy_scan_noprune(LVRelState *vacrel, Buffer buf,
 							  BlockNumber blkno, Page page,
@@ -815,11 +814,10 @@ lazy_scan_heap(LVRelState *vacrel)
 {
 	BlockNumber rel_pages = vacrel->rel_pages,
 				next_fsm_block_to_vacuum = 0;
-	bool		all_visible_according_to_vm;
-
-	/* relies on InvalidBlockNumber overflowing to 0 */
-	BlockNumber blkno = InvalidBlockNumber;
 	VacDeadItems *dead_items = vacrel->dead_items;
+	BlockNumber blkno;
+	bool		all_visible_according_to_vm;
+	Buffer		vmbuffer = InvalidBuffer;
 	const int	initprog_index[] = {
 		PROGRESS_VACUUM_PHASE,
 		PROGRESS_VACUUM_TOTAL_HEAP_BLKS,
@@ -834,10 +832,9 @@ lazy_scan_heap(LVRelState *vacrel)
 	pgstat_progress_update_multi_param(3, initprog_index, initprog_val);
 
 	vacrel->skip.next_unskippable_block = InvalidBlockNumber;
-	vacrel->skip.vmbuffer = InvalidBuffer;
 
 	while (heap_vac_scan_get_next_block(vacrel, blkno + 1,
-										&blkno, &all_visible_according_to_vm))
+										&blkno, &all_visible_according_to_vm, &vmbuffer))
 	{
 		Buffer		buf;
 		Page		page;
@@ -880,10 +877,10 @@ lazy_scan_heap(LVRelState *vacrel)
 			 * correctness, but we do it anyway to avoid holding the pin
 			 * across a lengthy, unrelated operation.
 			 */
-			if (BufferIsValid(vacrel->skip.vmbuffer))
+			if (BufferIsValid(vmbuffer))
 			{
-				ReleaseBuffer(vacrel->skip.vmbuffer);
-				vacrel->skip.vmbuffer = InvalidBuffer;
+				ReleaseBuffer(vmbuffer);
+				vmbuffer = InvalidBuffer;
 			}
 
 			/* Perform a round of index and heap vacuuming */
@@ -908,7 +905,7 @@ lazy_scan_heap(LVRelState *vacrel)
 		 * all-visible.  In most cases this will be very cheap, because we'll
 		 * already have the correct page pinned anyway.
 		 */
-		visibilitymap_pin(vacrel->rel, blkno, &vacrel->skip.vmbuffer);
+		visibilitymap_pin(vacrel->rel, blkno, &vmbuffer);
 
 		buf = ReadBufferExtended(vacrel->rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
 								 vacrel->bstrategy);
@@ -926,7 +923,8 @@ lazy_scan_heap(LVRelState *vacrel)
 			LockBuffer(buf, BUFFER_LOCK_SHARE);
 
 		/* Check for new or empty pages before lazy_scan_[no]prune call */
-		if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock))
+		if (lazy_scan_new_or_empty(vacrel, buf, blkno, page, !got_cleanup_lock,
+								   vmbuffer))
 		{
 			/* Processed as new/empty page (lock and pin released) */
 			continue;
@@ -968,7 +966,7 @@ lazy_scan_heap(LVRelState *vacrel)
 		 */
 		if (got_cleanup_lock)
 			lazy_scan_prune(vacrel, buf, blkno, page,
-							all_visible_according_to_vm,
+							vmbuffer, all_visible_according_to_vm,
 							&has_lpdead_items);
 
 		/*
@@ -1018,11 +1016,8 @@ lazy_scan_heap(LVRelState *vacrel)
 	}
 
 	vacrel->blkno = InvalidBlockNumber;
-	if (BufferIsValid(vacrel->skip.vmbuffer))
-	{
-		ReleaseBuffer(vacrel->skip.vmbuffer);
-		vacrel->skip.vmbuffer = InvalidBuffer;
-	}
+	if (BufferIsValid(vmbuffer))
+		ReleaseBuffer(vmbuffer);
 
 	/* report that everything is now scanned */
 	pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
@@ -1094,7 +1089,7 @@ lazy_scan_heap(LVRelState *vacrel)
  * relation are read and vacrel->skippedallvis is set to ensure we don't
  * advance relfrozenxid when we have skipped vacuuming all visible blocks.
  *
- * skip->vmbuffer will contain the block from the VM containing visibility
+ * vmbuffer will contain the block from the VM containing visibility
  * information for the next unskippable heap block. We may end up needed a
  * different block from the VM (if we decide not to skip a skippable block).
  * This is okay; visibilitymap_pin() will take care of this while processing
@@ -1110,7 +1105,7 @@ lazy_scan_heap(LVRelState *vacrel)
  */
 static bool
 heap_vac_scan_get_next_block(LVRelState *vacrel, BlockNumber next_block,
-							 BlockNumber *blkno, bool *all_visible_according_to_vm)
+							 BlockNumber *blkno, bool *all_visible_according_to_vm, Buffer *vmbuffer)
 {
 	bool		skipsallvis = false;
 
@@ -1131,7 +1126,7 @@ heap_vac_scan_get_next_block(LVRelState *vacrel, BlockNumber next_block,
 		{
 			uint8		mapbits = visibilitymap_get_status(vacrel->rel,
 														   next_unskippable_block,
-														   &vacrel->skip.vmbuffer);
+														   vmbuffer);
 
 			vacrel->skip.next_unskippable_allvis = mapbits & VISIBILITYMAP_ALL_VISIBLE;
 
@@ -1245,7 +1240,7 @@ heap_vac_scan_get_next_block(LVRelState *vacrel, BlockNumber next_block,
  */
 static bool
 lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
-					   Page page, bool sharelock)
+					   Page page, bool sharelock, Buffer vmbuffer)
 {
 	Size		freespace;
 
@@ -1331,7 +1326,7 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
 
 			PageSetAllVisible(page);
 			visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
-							  vacrel->skip.vmbuffer, InvalidTransactionId,
+							  vmbuffer, InvalidTransactionId,
 							  VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
 			END_CRIT_SECTION();
 		}
@@ -1367,11 +1362,10 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
  * any tuple that becomes dead after the call to heap_page_prune() can't need to
  * be frozen, because it was visible to another session when vacuum started.
  *
- * vacrel->skipstate.vmbuffer is the buffer containing the VM block with
- * visibility information for the heap block, blkno.
- * all_visible_according_to_vm is the saved visibility status of the heap block
- * looked up earlier by the caller. We won't rely entirely on this status, as
- * it may be out of date.
+ * vmbuffer is the buffer containing the VM block with visibility information
+ * for the heap block, blkno. all_visible_according_to_vm is the saved
+ * visibility status of the heap block looked up earlier by the caller. We
+ * won't rely entirely on this status, as it may be out of date.
  *
  * *has_lpdead_items is set to true or false depending on whether, upon return
  * from this function, any LP_DEAD items are still present on the page.
@@ -1381,6 +1375,7 @@ lazy_scan_prune(LVRelState *vacrel,
 				Buffer buf,
 				BlockNumber blkno,
 				Page page,
+				Buffer vmbuffer,
 				bool all_visible_according_to_vm,
 				bool *has_lpdead_items)
 {
@@ -1814,8 +1809,7 @@ lazy_scan_prune(LVRelState *vacrel,
 		PageSetAllVisible(page);
 		MarkBufferDirty(buf);
 		visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
-						  vacrel->skip.vmbuffer, visibility_cutoff_xid,
-						  flags);
+						  vmbuffer, visibility_cutoff_xid, flags);
 	}
 
 	/*
@@ -1825,11 +1819,11 @@ lazy_scan_prune(LVRelState *vacrel,
 	 * buffer lock before concluding that the VM is corrupt.
 	 */
 	else if (all_visible_according_to_vm && !PageIsAllVisible(page) &&
-			 visibilitymap_get_status(vacrel->rel, blkno, &vacrel->skip.vmbuffer) != 0)
+			 visibilitymap_get_status(vacrel->rel, blkno, &vmbuffer) != 0)
 	{
 		elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
 			 vacrel->relname, blkno);
-		visibilitymap_clear(vacrel->rel, blkno, vacrel->skip.vmbuffer,
+		visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
 							VISIBILITYMAP_VALID_BITS);
 	}
 
@@ -1853,7 +1847,7 @@ lazy_scan_prune(LVRelState *vacrel,
 			 vacrel->relname, blkno);
 		PageClearAllVisible(page);
 		MarkBufferDirty(buf);
-		visibilitymap_clear(vacrel->rel, blkno, vacrel->skip.vmbuffer,
+		visibilitymap_clear(vacrel->rel, blkno, vmbuffer,
 							VISIBILITYMAP_VALID_BITS);
 	}
 
@@ -1863,7 +1857,7 @@ lazy_scan_prune(LVRelState *vacrel,
 	 * true, so we must check both all_visible and all_frozen.
 	 */
 	else if (all_visible_according_to_vm && all_visible &&
-			 all_frozen && !VM_ALL_FROZEN(vacrel->rel, blkno, &vacrel->skip.vmbuffer))
+			 all_frozen && !VM_ALL_FROZEN(vacrel->rel, blkno, &vmbuffer))
 	{
 		/*
 		 * Avoid relying on all_visible_according_to_vm as a proxy for the
@@ -1885,7 +1879,7 @@ lazy_scan_prune(LVRelState *vacrel,
 		 */
 		Assert(!TransactionIdIsValid(visibility_cutoff_xid));
 		visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
-						  vacrel->skip.vmbuffer, InvalidTransactionId,
+						  vmbuffer, InvalidTransactionId,
 						  VISIBILITYMAP_ALL_VISIBLE |
 						  VISIBILITYMAP_ALL_FROZEN);
 	}
-- 
2.39.2

