From 3c208039193ce94111e8ddc1b03828cf820e11e3 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Tue, 19 Mar 2024 19:30:16 -0400
Subject: [PATCH v4 08/19] lazy_scan_prune reorder freeze execution logic

To combine the prune and freeze records, freezing must be done before a
pruning WAL record is emitted. We will move the freeze execution into
heap_page_prune() in future commits. lazy_scan_prune() currently
executes freezing, updates vacrel->NewRelfrozenXid and
vacrel->NewRelminMxid, and resets the snapshotConflictHorizon that the
visibility map update record may use in the same block of if statements.

This commit starts reordering that logic so that the freeze execution
can be separated from the other updates which should not be done in
pruning.
---
 src/backend/access/heap/vacuumlazy.c | 92 +++++++++++++++-------------
 1 file changed, 49 insertions(+), 43 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 4187c998d25..74ebab25a95 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -1421,6 +1421,7 @@ lazy_scan_prune(LVRelState *vacrel,
 				recently_dead_tuples;
 	HeapPageFreeze pagefrz;
 	bool		hastup = false;
+	bool		do_freeze;
 	int64		fpi_before = pgWalUsage.wal_fpi;
 	OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
 
@@ -1580,10 +1581,15 @@ lazy_scan_prune(LVRelState *vacrel,
 	 * freeze when pruning generated an FPI, if doing so means that we set the
 	 * page all-frozen afterwards (might not happen until final heap pass).
 	 */
-	if (pagefrz.freeze_required || presult.nfrozen == 0 ||
+	do_freeze = pagefrz.freeze_required ||
 		(presult.all_visible_except_removable && presult.all_frozen &&
-		 fpi_before != pgWalUsage.wal_fpi))
+		 presult.nfrozen > 0 &&
+		 fpi_before != pgWalUsage.wal_fpi);
+
+	if (do_freeze)
 	{
+		TransactionId snapshotConflictHorizon;
+
 		/*
 		 * We're freezing the page.  Our final NewRelfrozenXid doesn't need to
 		 * be affected by the XIDs that are just about to be frozen anyway.
@@ -1591,52 +1597,52 @@ lazy_scan_prune(LVRelState *vacrel,
 		vacrel->NewRelfrozenXid = pagefrz.FreezePageRelfrozenXid;
 		vacrel->NewRelminMxid = pagefrz.FreezePageRelminMxid;
 
-		if (presult.nfrozen == 0)
-		{
-			/*
-			 * We have no freeze plans to execute, so there's no added cost
-			 * from following the freeze path.  That's why it was chosen. This
-			 * is important in the case where the page only contains totally
-			 * frozen tuples at this point (perhaps only following pruning).
-			 * Such pages can be marked all-frozen in the VM by our caller,
-			 * even though none of its tuples were newly frozen here (note
-			 * that the "no freeze" path never sets pages all-frozen).
-			 *
-			 * We never increment the frozen_pages instrumentation counter
-			 * here, since it only counts pages with newly frozen tuples
-			 * (don't confuse that with pages newly set all-frozen in VM).
-			 */
-		}
+		vacrel->frozen_pages++;
+
+		/*
+		 * We can use frz_conflict_horizon as our cutoff for conflicts when
+		 * the whole page is eligible to become all-frozen in the VM once
+		 * we're done with it.  Otherwise we generate a conservative cutoff by
+		 * stepping back from OldestXmin.
+		 */
+		if (presult.all_visible_except_removable && presult.all_frozen)
+			snapshotConflictHorizon = presult.frz_conflict_horizon;
 		else
 		{
-			TransactionId snapshotConflictHorizon;
+			/* Avoids false conflicts when hot_standby_feedback in use */
+			snapshotConflictHorizon = pagefrz.cutoffs->OldestXmin;
+			TransactionIdRetreat(snapshotConflictHorizon);
+		}
 
-			vacrel->frozen_pages++;
+		/* Using same cutoff when setting VM is now unnecessary */
+		if (presult.all_visible_except_removable && presult.all_frozen)
+			presult.frz_conflict_horizon = InvalidTransactionId;
 
-			/*
-			 * We can use frz_conflict_horizon as our cutoff for conflicts
-			 * when the whole page is eligible to become all-frozen in the VM
-			 * once we're done with it.  Otherwise we generate a conservative
-			 * cutoff by stepping back from OldestXmin.
-			 */
-			if (presult.all_visible_except_removable && presult.all_frozen)
-			{
-				/* Using same cutoff when setting VM is now unnecessary */
-				snapshotConflictHorizon = presult.frz_conflict_horizon;
-				presult.frz_conflict_horizon = InvalidTransactionId;
-			}
-			else
-			{
-				/* Avoids false conflicts when hot_standby_feedback in use */
-				snapshotConflictHorizon = vacrel->cutoffs.OldestXmin;
-				TransactionIdRetreat(snapshotConflictHorizon);
-			}
+		/* Execute all freeze plans for page as a single atomic action */
+		heap_freeze_execute_prepared(vacrel->rel, buf,
+									 snapshotConflictHorizon,
+									 presult.frozen, presult.nfrozen);
+	}
+	else if (presult.all_frozen && presult.nfrozen == 0)
+	{
+		/* Page should be all visible except to-be-removed tuples */
+		Assert(presult.all_visible_except_removable);
 
-			/* Execute all freeze plans for page as a single atomic action */
-			heap_freeze_execute_prepared(vacrel->rel, buf,
-										 snapshotConflictHorizon,
-										 presult.frozen, presult.nfrozen);
-		}
+		/*
+		 * We have no freeze plans to execute, so there's no added cost from
+		 * following the freeze path.  That's why it was chosen. This is
+		 * important in the case where the page only contains totally frozen
+		 * tuples at this point (perhaps only following pruning). Such pages
+		 * can be marked all-frozen in the VM by our caller, even though none
+		 * of its tuples were newly frozen here (note that the "no freeze"
+		 * path never sets pages all-frozen).
+		 *
+		 * We never increment the frozen_pages instrumentation counter here,
+		 * since it only counts pages with newly frozen tuples (don't confuse
+		 * that with pages newly set all-frozen in VM).
+		 */
+		vacrel->NewRelfrozenXid = pagefrz.FreezePageRelfrozenXid;
+		vacrel->NewRelminMxid = pagefrz.FreezePageRelminMxid;
 	}
 	else
 	{
-- 
2.40.1

