From 674b2272f9a40310dd01860a221d7221563571c6 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Mon, 25 Mar 2024 20:32:11 -0400
Subject: [PATCH v7 08/16] Execute freezing in heap_page_prune()

As a step toward combining the prune and freeze WAL records, execute
freezing in heap_page_prune(). The logic to determine whether or not to
execute freeze plans was moved from lazy_scan_prune() over to
heap_page_prune() with little modification.
---
 src/backend/access/heap/heapam_handler.c |   2 +-
 src/backend/access/heap/pruneheap.c      | 189 ++++++++++++++++++-----
 src/backend/access/heap/vacuumlazy.c     | 150 +++++-------------
 src/backend/storage/ipc/procarray.c      |   6 +-
 src/include/access/heapam.h              |  53 ++++---
 src/tools/pgindent/typedefs.list         |   2 +-
 6 files changed, 225 insertions(+), 177 deletions(-)

diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index 2b7c7026429..4802d789963 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -1048,7 +1048,7 @@ heapam_scan_analyze_next_tuple(TableScanDesc scan, TransactionId OldestXmin,
 		 * We ignore unused and redirect line pointers.  DEAD line pointers
 		 * should be counted as dead, because we need vacuum to run to get rid
 		 * of them.  Note that this rule agrees with the way that
-		 * heap_page_prune() counts things.
+		 * heap_page_prune_and_freeze() counts things.
 		 */
 		if (!ItemIdIsNormal(itemid))
 		{
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 457650ab651..e009c7579dd 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -17,16 +17,19 @@
 #include "access/heapam.h"
 #include "access/heapam_xlog.h"
 #include "access/htup_details.h"
+#include "access/multixact.h"
 #include "access/transam.h"
 #include "access/xlog.h"
+#include "commands/vacuum.h"
 #include "access/xloginsert.h"
+#include "executor/instrument.h"
 #include "miscadmin.h"
 #include "pgstat.h"
 #include "storage/bufmgr.h"
 #include "utils/rel.h"
 #include "utils/snapmgr.h"
 
-/* Working data for heap_page_prune and subroutines */
+/* Working data for heap_page_prune_and_freeze() and subroutines */
 typedef struct
 {
 	/* tuple visibility test, initialized for the relation */
@@ -51,6 +54,11 @@ typedef struct
 	 * 1. Otherwise every access would need to subtract 1.
 	 */
 	bool		marked[MaxHeapTuplesPerPage + 1];
+
+	/*
+	 * One entry for every tuple that we may freeze.
+	 */
+	HeapTupleFreeze frozen[MaxHeapTuplesPerPage];
 } PruneState;
 
 /* Local functions */
@@ -59,14 +67,15 @@ static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate,
 											   Buffer buffer);
 static int	heap_prune_chain(Buffer buffer,
 							 OffsetNumber rootoffnum,
-							 PruneState *prstate, PruneResult *presult);
+							 PruneState *prstate, PruneFreezeResult *presult);
+
 static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid);
 static void heap_prune_record_redirect(PruneState *prstate,
 									   OffsetNumber offnum, OffsetNumber rdoffnum);
 static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum,
-								   PruneResult *presult);
+								   PruneFreezeResult *presult);
 static void heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum,
-											 PruneResult *presult);
+											 PruneFreezeResult *presult);
 static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum);
 static void page_verify_redirects(Page page);
 
@@ -146,15 +155,15 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
 		 */
 		if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
 		{
-			PruneResult presult;
+			PruneFreezeResult presult;
 
 			/*
 			 * For now, pass mark_unused_now as false regardless of whether or
 			 * not the relation has indexes, since we cannot safely determine
 			 * that during on-access pruning with the current implementation.
 			 */
-			heap_page_prune(relation, buffer, vistest, false, NULL,
-							&presult, PRUNE_ON_ACCESS, NULL);
+			heap_page_prune_and_freeze(relation, buffer, vistest, false, NULL,
+									   &presult, PRUNE_ON_ACCESS, NULL);
 
 			/*
 			 * Report the number of tuples reclaimed to pgstats.  This is
@@ -188,7 +197,12 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
 
 
 /*
- * Prune and repair fragmentation in the specified page.
+ * Prune and repair fragmentation and potentially freeze tuples on the
+ * specified page.
+ *
+ * If the page can be marked all-frozen in the visibility map, we may
+ * opportunistically freeze tuples on the page if either its tuples are old
+ * enough or freezing will be cheap enough.
  *
  * Caller must have pin and buffer cleanup lock on the page.  Note that we
  * don't update the FSM information for page on caller's behalf.  Caller might
@@ -201,12 +215,13 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
  * mark_unused_now indicates whether or not dead items can be set LP_UNUSED
  * during pruning.
  *
- * pagefrz contains both input and output parameters used if the caller is
- * interested in potentially freezing tuples on the page.
+ * pagefrz is an input parameter containing visibility cutoff information and
+ * the current relfrozenxid and relminmxids used if the caller is interested in
+ * freezing tuples on the page.
  *
  * presult contains output parameters needed by callers such as the number of
  * tuples removed and the number of line pointers newly marked LP_DEAD.
- * heap_page_prune() is responsible for initializing it.
+ * heap_page_prune_and_freeze() is responsible for initializing it.
  *
  * reason indicates why the pruning is performed.  It is included in the WAL
  * record for debugging and analysis purposes, but otherwise has no effect.
@@ -215,13 +230,13 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
  * callback.
  */
 void
-heap_page_prune(Relation relation, Buffer buffer,
-				GlobalVisState *vistest,
-				bool mark_unused_now,
-				HeapPageFreeze *pagefrz,
-				PruneResult *presult,
-				PruneReason reason,
-				OffsetNumber *off_loc)
+heap_page_prune_and_freeze(Relation relation, Buffer buffer,
+						   GlobalVisState *vistest,
+						   bool mark_unused_now,
+						   HeapPageFreeze *pagefrz,
+						   PruneFreezeResult *presult,
+						   PruneReason reason,
+						   OffsetNumber *off_loc)
 {
 	Page		page = BufferGetPage(buffer);
 	BlockNumber blockno = BufferGetBlockNumber(buffer);
@@ -229,6 +244,10 @@ heap_page_prune(Relation relation, Buffer buffer,
 				maxoff;
 	PruneState	prstate;
 	HeapTupleData tup;
+	TransactionId visibility_cutoff_xid;
+	bool		do_freeze;
+	bool		all_visible_except_removable;
+	int64		fpi_before = pgWalUsage.wal_fpi;
 
 	/*
 	 * Our strategy is to scan the page and make lists of items to change,
@@ -264,9 +283,20 @@ heap_page_prune(Relation relation, Buffer buffer,
 	 * all_visible is also set to true.
 	 */
 	presult->all_frozen = true;
-	presult->all_visible = true;
-	/* for recovery conflicts */
-	presult->visibility_cutoff_xid = InvalidTransactionId;
+
+	/*
+	 * The visibility cutoff xid is the newest xmin of live tuples on the
+	 * page. In the common case, this will be set as the conflict horizon the
+	 * caller can use for updating the VM. If, at the end of freezing and
+	 * pruning, the page is all-frozen, there is no possibility that any
+	 * running transaction on the standby does not see tuples on the page as
+	 * all-visible, so the conflict horizon remains InvalidTransactionId.
+	 */
+	presult->vm_conflict_horizon = visibility_cutoff_xid = InvalidTransactionId;
+
+	/* For advancing relfrozenxid and relminmxid */
+	presult->new_relfrozenxid = InvalidTransactionId;
+	presult->new_relminmxid = InvalidMultiXactId;
 
 	maxoff = PageGetMaxOffsetNumber(page);
 	tup.t_tableOid = RelationGetRelid(relation);
@@ -291,6 +321,7 @@ heap_page_prune(Relation relation, Buffer buffer,
 	 * prefetching efficiency significantly / decreases the number of cache
 	 * misses.
 	 */
+	all_visible_except_removable = true;
 	for (offnum = maxoff;
 		 offnum >= FirstOffsetNumber;
 		 offnum = OffsetNumberPrev(offnum))
@@ -351,13 +382,13 @@ heap_page_prune(Relation relation, Buffer buffer,
 				 * asynchronously. See SetHintBits for more info. Check that
 				 * the tuple is hinted xmin-committed because of that.
 				 */
-				if (presult->all_visible)
+				if (all_visible_except_removable)
 				{
 					TransactionId xmin;
 
 					if (!HeapTupleHeaderXminCommitted(htup))
 					{
-						presult->all_visible = false;
+						all_visible_except_removable = false;
 						break;
 					}
 
@@ -373,25 +404,25 @@ heap_page_prune(Relation relation, Buffer buffer,
 					if (xmin != FrozenTransactionId &&
 						!GlobalVisTestIsRemovableXid(vistest, xmin))
 					{
-						presult->all_visible = false;
+						all_visible_except_removable = false;
 						break;
 					}
 
 					/* Track newest xmin on page. */
-					if (TransactionIdFollows(xmin, presult->visibility_cutoff_xid) &&
+					if (TransactionIdFollows(xmin, visibility_cutoff_xid) &&
 						TransactionIdIsNormal(xmin))
-						presult->visibility_cutoff_xid = xmin;
+						visibility_cutoff_xid = xmin;
 				}
 				break;
 			case HEAPTUPLE_RECENTLY_DEAD:
-				presult->all_visible = false;
+				all_visible_except_removable = false;
 				break;
 			case HEAPTUPLE_INSERT_IN_PROGRESS:
-				presult->all_visible = false;
+				all_visible_except_removable = false;
 				break;
 			case HEAPTUPLE_DELETE_IN_PROGRESS:
 				/* This is an expected case during concurrent vacuum */
-				presult->all_visible = false;
+				all_visible_except_removable = false;
 				break;
 			default:
 				elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
@@ -407,11 +438,11 @@ heap_page_prune(Relation relation, Buffer buffer,
 
 			/* Tuple with storage -- consider need to freeze */
 			if ((heap_prepare_freeze_tuple(htup, pagefrz,
-										   &presult->frozen[presult->nfrozen],
+										   &prstate.frozen[presult->nfrozen],
 										   &totally_frozen)))
 			{
 				/* Save prepared freeze plan for later */
-				presult->frozen[presult->nfrozen++].offset = offnum;
+				prstate.frozen[presult->nfrozen++].offset = offnum;
 			}
 
 			/*
@@ -438,7 +469,7 @@ heap_page_prune(Relation relation, Buffer buffer,
 	 * pruning and keep all_visible_except_removable to permit freezing if the
 	 * whole page will eventually become all visible after removing tuples.
 	 */
-	presult->all_visible_except_removable = presult->all_visible;
+	presult->all_visible = all_visible_except_removable;
 
 	/* Scan the page */
 	for (offnum = FirstOffsetNumber;
@@ -537,6 +568,86 @@ heap_page_prune(Relation relation, Buffer buffer,
 
 	/* Record number of newly-set-LP_DEAD items for caller */
 	presult->nnewlpdead = prstate.ndead;
+
+	/*
+	 * Freeze the page when heap_prepare_freeze_tuple indicates that at least
+	 * one XID/MXID from before FreezeLimit/MultiXactCutoff is present.  Also
+	 * freeze when pruning generated an FPI, if doing so means that we set the
+	 * page all-frozen afterwards (might not happen until final heap pass).
+	 */
+	if (pagefrz)
+		do_freeze = pagefrz->freeze_required ||
+			(all_visible_except_removable && presult->all_frozen &&
+			 presult->nfrozen > 0 &&
+			 fpi_before != pgWalUsage.wal_fpi);
+	else
+		do_freeze = false;
+
+	if (do_freeze)
+	{
+		TransactionId frz_conflict_horizon = InvalidTransactionId;
+
+		/*
+		 * We can use the visibility_cutoff_xid as our cutoff for conflicts
+		 * when the whole page is eligible to become all-frozen in the VM once
+		 * we're done with it.  Otherwise we generate a conservative cutoff by
+		 * stepping back from OldestXmin. This avoids false conflicts when
+		 * hot_standby_feedback is in use.
+		 */
+		if (all_visible_except_removable && presult->all_frozen)
+			frz_conflict_horizon = visibility_cutoff_xid;
+		else
+		{
+			/* Avoids false conflicts when hot_standby_feedback in use */
+			frz_conflict_horizon = pagefrz->cutoffs->OldestXmin;
+			TransactionIdRetreat(frz_conflict_horizon);
+		}
+
+		/* Execute all freeze plans for page as a single atomic action */
+		heap_freeze_execute_prepared(relation, buffer,
+									 frz_conflict_horizon,
+									 prstate.frozen, presult->nfrozen);
+	}
+	else if (!pagefrz || !presult->all_frozen || presult->nfrozen > 0)
+	{
+		/*
+		 * If we will neither freeze tuples on the page nor set the page all
+		 * frozen in the visibility map, the page is not all frozen and there
+		 * will be no newly frozen tuples.
+		 */
+		presult->all_frozen = false;
+		presult->nfrozen = 0;	/* avoid miscounts in instrumentation */
+	}
+
+	/*
+	 * For callers planning to update the visibility map, the conflict horizon
+	 * for that record must be the newest xmin on the page. However, if the
+	 * page is completely frozen, there can be no conflict and the
+	 * vm_conflict_horizon should remain InvalidTransactionId.
+	 */
+	if (!presult->all_frozen)
+		presult->vm_conflict_horizon = visibility_cutoff_xid;
+
+	if (pagefrz)
+	{
+		/*
+		 * If we will freeze tuples on the page or, even if we don't freeze
+		 * tuples on the page, if we will set the page all-frozen in the
+		 * visibility map, we can advance relfrozenxid and relminmxid to the
+		 * values in pagefrz->FreezePageRelfrozenXid and
+		 * pagefrz->FreezePageRelminMxid.
+		 */
+		if (presult->all_frozen || presult->nfrozen > 0)
+		{
+			presult->new_relfrozenxid = pagefrz->FreezePageRelfrozenXid;
+			presult->new_relminmxid = pagefrz->FreezePageRelminMxid;
+		}
+		else
+		{
+			presult->new_relfrozenxid = pagefrz->NoFreezePageRelfrozenXid;
+			presult->new_relminmxid = pagefrz->NoFreezePageRelminMxid;
+		}
+	}
 }
 
 
@@ -594,7 +705,7 @@ heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
  */
 static int
 heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
-				 PruneState *prstate, PruneResult *presult)
+				 PruneState *prstate, PruneFreezeResult *presult)
 {
 	int			ndeleted = 0;
 	Page		dp = (Page) BufferGetPage(buffer);
@@ -859,10 +970,10 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
 	{
 		/*
 		 * We found a redirect item that doesn't point to a valid follow-on
-		 * item.  This can happen if the loop in heap_page_prune caused us to
-		 * visit the dead successor of a redirect item before visiting the
-		 * redirect item.  We can clean up by setting the redirect item to
-		 * DEAD state or LP_UNUSED if the caller indicated.
+		 * item.  This can happen if the loop in heap_page_prune_and_freeze()
+		 * caused us to visit the dead successor of a redirect item before
+		 * visiting the redirect item.  We can clean up by setting the
+		 * redirect item to DEAD state or LP_UNUSED if the caller indicated.
 		 */
 		heap_prune_record_dead_or_unused(prstate, rootoffnum, presult);
 	}
@@ -902,7 +1013,7 @@ heap_prune_record_redirect(PruneState *prstate,
 /* Record line pointer to be marked dead */
 static void
 heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum,
-					   PruneResult *presult)
+					   PruneFreezeResult *presult)
 {
 	Assert(prstate->ndead < MaxHeapTuplesPerPage);
 	prstate->nowdead[prstate->ndead] = offnum;
@@ -925,7 +1036,7 @@ heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum,
  */
 static void
 heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum,
-								 PruneResult *presult)
+								 PruneFreezeResult *presult)
 {
 	/*
 	 * If the caller set mark_unused_now to true, we can remove dead tuples
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index f474e661428..8beef4093ae 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -430,12 +430,12 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
 	 * as an upper bound on the XIDs stored in the pages we'll actually scan
 	 * (NewRelfrozenXid tracking must never be allowed to miss unfrozen XIDs).
 	 *
-	 * Next acquire vistest, a related cutoff that's used in heap_page_prune.
-	 * We expect vistest will always make heap_page_prune remove any deleted
-	 * tuple whose xmax is < OldestXmin.  lazy_scan_prune must never become
-	 * confused about whether a tuple should be frozen or removed.  (In the
-	 * future we might want to teach lazy_scan_prune to recompute vistest from
-	 * time to time, to increase the number of dead tuples it can prune away.)
+	 * Next acquire vistest, a related cutoff that's used in
+	 * heap_page_prune_and_freeze(). We expect vistest will always make
+	 * heap_page_prune_and_freeze() remove any deleted tuple whose xmax is <
+	 * OldestXmin. (In the future we might want to teach lazy_scan_prune to
+	 * recompute vistest from time to time, to increase the number of dead
+	 * tuples it can prune away.)
 	 */
 	vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
 	vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
@@ -1378,21 +1378,21 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
  *
  * Caller must hold pin and buffer cleanup lock on the buffer.
  *
- * Prior to PostgreSQL 14 there were very rare cases where heap_page_prune()
- * was allowed to disagree with our HeapTupleSatisfiesVacuum() call about
- * whether or not a tuple should be considered DEAD.  This happened when an
- * inserting transaction concurrently aborted (after our heap_page_prune()
- * call, before our HeapTupleSatisfiesVacuum() call).  There was rather a lot
- * of complexity just so we could deal with tuples that were DEAD to VACUUM,
- * but nevertheless were left with storage after pruning.
+ * Prior to PostgreSQL 14 there were very rare cases where
+ * heap_page_prune_and_freeze() was allowed to disagree with our
+ * HeapTupleSatisfiesVacuum() call about whether or not a tuple should be
+ * considered DEAD.  This happened when an inserting transaction concurrently
+ * aborted (after our heap_page_prune_and_freeze() call, before our
+ * HeapTupleSatisfiesVacuum() call).  There was rather a lot of complexity just
+ * so we could deal with tuples that were DEAD to VACUUM, but nevertheless were
+ * left with storage after pruning.
  *
  * As of Postgres 17, we circumvent this problem altogether by reusing the
- * result of heap_page_prune()'s visibility check. Without the second call to
- * HeapTupleSatisfiesVacuum(), there is no new HTSV_Result and there can be no
- * disagreement. We'll just handle such tuples as if they had become fully dead
- * right after this operation completes instead of in the middle of it. Note that
- * any tuple that becomes dead after the call to heap_page_prune() can't need to
- * be frozen, because it was visible to another session when vacuum started.
+ * result of heap_page_prune_and_freeze()'s visibility check. Without the
+ * second call to HeapTupleSatisfiesVacuum(), there is no new HTSV_Result and
+ * there can be no disagreement. We'll just handle such tuples as if they had
+ * become fully dead right after this operation completes instead of in the
+ * middle of it.
  *
  * vmbuffer is the buffer containing the VM block with visibility information
  * for the heap block, blkno. all_visible_according_to_vm is the saved
@@ -1415,26 +1415,24 @@ lazy_scan_prune(LVRelState *vacrel,
 	OffsetNumber offnum,
 				maxoff;
 	ItemId		itemid;
-	PruneResult presult;
+	PruneFreezeResult presult;
 	int			lpdead_items,
 				live_tuples,
 				recently_dead_tuples;
 	HeapPageFreeze pagefrz;
 	bool		hastup = false;
-	bool		do_freeze;
-	int64		fpi_before = pgWalUsage.wal_fpi;
 	OffsetNumber deadoffsets[MaxHeapTuplesPerPage];
 
 	Assert(BufferGetBlockNumber(buf) == blkno);
 
 	/*
 	 * maxoff might be reduced following line pointer array truncation in
-	 * heap_page_prune.  That's safe for us to ignore, since the reclaimed
-	 * space will continue to look like LP_UNUSED items below.
+	 * heap_page_prune_and_freeze().  That's safe for us to ignore, since the
+	 * reclaimed space will continue to look like LP_UNUSED items below.
 	 */
 	maxoff = PageGetMaxOffsetNumber(page);
 
-	/* Initialize (or reset) page-level state */
+	/* Initialize pagefrz */
 	pagefrz.freeze_required = false;
 	pagefrz.FreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
 	pagefrz.FreezePageRelminMxid = vacrel->NewRelminMxid;
@@ -1446,7 +1444,7 @@ lazy_scan_prune(LVRelState *vacrel,
 	recently_dead_tuples = 0;
 
 	/*
-	 * Prune all HOT-update chains in this page.
+	 * Prune all HOT-update chains and potentially freeze tuples on this page.
 	 *
 	 * We count the number of tuples removed from the page by the pruning step
 	 * in presult.ndeleted. It should not be confused with lpdead_items;
@@ -1457,8 +1455,8 @@ lazy_scan_prune(LVRelState *vacrel,
 	 * items LP_UNUSED, so mark_unused_now should be true if no indexes and
 	 * false otherwise.
 	 */
-	heap_page_prune(rel, buf, vacrel->vistest, vacrel->nindexes == 0, &pagefrz,
-					&presult, PRUNE_VACUUM_SCAN, &vacrel->offnum);
+	heap_page_prune_and_freeze(rel, buf, vacrel->vistest, vacrel->nindexes == 0,
+							   &pagefrz, &presult, PRUNE_VACUUM_SCAN, &vacrel->offnum);
 
 	/*
 	 * Now scan the page to collect LP_DEAD items and update the variables set
@@ -1571,86 +1569,20 @@ lazy_scan_prune(LVRelState *vacrel,
 
 	vacrel->offnum = InvalidOffsetNumber;
 
-	/*
-	 * Freeze the page when heap_prepare_freeze_tuple indicates that at least
-	 * one XID/MXID from before FreezeLimit/MultiXactCutoff is present.  Also
-	 * freeze when pruning generated an FPI, if doing so means that we set the
-	 * page all-frozen afterwards (might not happen until final heap pass).
-	 */
-	do_freeze = pagefrz.freeze_required ||
-		(presult.all_visible_except_removable && presult.all_frozen &&
-		 presult.nfrozen > 0 &&
-		 fpi_before != pgWalUsage.wal_fpi);
+	Assert(MultiXactIdIsValid(presult.new_relminmxid));
+	vacrel->NewRelfrozenXid = presult.new_relfrozenxid;
+	Assert(TransactionIdIsValid(presult.new_relfrozenxid));
+	vacrel->NewRelminMxid = presult.new_relminmxid;
 
-	if (do_freeze)
+	if (presult.nfrozen > 0)
 	{
-		TransactionId snapshotConflictHorizon;
-
 		/*
-		 * We're freezing the page.  Our final NewRelfrozenXid doesn't need to
-		 * be affected by the XIDs that are just about to be frozen anyway.
+		 * We never increment the frozen_pages instrumentation counter when
+		 * nfrozen == 0, since it only counts pages with newly frozen tuples
+		 * (don't confuse that with pages newly set all-frozen in VM).
 		 */
-		vacrel->NewRelfrozenXid = pagefrz.FreezePageRelfrozenXid;
-		vacrel->NewRelminMxid = pagefrz.FreezePageRelminMxid;
-
 		vacrel->frozen_pages++;
 
-		/*
-		 * We can use frz_conflict_horizon as our cutoff for conflicts when
-		 * the whole page is eligible to become all-frozen in the VM once
-		 * we're done with it.  Otherwise we generate a conservative cutoff by
-		 * stepping back from OldestXmin.
-		 */
-		if (presult.all_visible_except_removable && presult.all_frozen)
-			snapshotConflictHorizon = presult.visibility_cutoff_xid;
-		else
-		{
-			/* Avoids false conflicts when hot_standby_feedback in use */
-			snapshotConflictHorizon = pagefrz.cutoffs->OldestXmin;
-			TransactionIdRetreat(snapshotConflictHorizon);
-		}
-
-		/* Using same cutoff when setting VM is now unnecessary */
-		if (presult.all_visible_except_removable && presult.all_frozen)
-			presult.visibility_cutoff_xid = InvalidTransactionId;
-
-		/* Execute all freeze plans for page as a single atomic action */
-		heap_freeze_execute_prepared(vacrel->rel, buf,
-									 snapshotConflictHorizon,
-									 presult.frozen, presult.nfrozen);
-
-	}
-	else if (presult.all_frozen && presult.nfrozen == 0)
-	{
-		/* Page should be all visible except to-be-removed tuples */
-		Assert(presult.all_visible_except_removable);
-
-		/*
-		 * We have no freeze plans to execute, so there's no added cost from
-		 * following the freeze path.  That's why it was chosen. This is
-		 * important in the case where the page only contains totally frozen
-		 * tuples at this point (perhaps only following pruning). Such pages
-		 * can be marked all-frozen in the VM by our caller, even though none
-		 * of its tuples were newly frozen here (note that the "no freeze"
-		 * path never sets pages all-frozen).
-		 *
-		 * We never increment the frozen_pages instrumentation counter here,
-		 * since it only counts pages with newly frozen tuples (don't confuse
-		 * that with pages newly set all-frozen in VM).
-		 */
-		vacrel->NewRelfrozenXid = pagefrz.FreezePageRelfrozenXid;
-		vacrel->NewRelminMxid = pagefrz.FreezePageRelminMxid;
-	}
-	else
-	{
-		/*
-		 * Page requires "no freeze" processing.  It might be set all-visible
-		 * in the visibility map, but it can never be set all-frozen.
-		 */
-		vacrel->NewRelfrozenXid = pagefrz.NoFreezePageRelfrozenXid;
-		vacrel->NewRelminMxid = pagefrz.NoFreezePageRelminMxid;
-		presult.all_frozen = false;
-		presult.nfrozen = 0;	/* avoid miscounts in instrumentation */
 	}
 
 	/*
@@ -1676,7 +1608,7 @@ lazy_scan_prune(LVRelState *vacrel,
 		Assert(presult.all_frozen == debug_all_frozen);
 
 		Assert(!TransactionIdIsValid(debug_cutoff) ||
-			   debug_cutoff == presult.visibility_cutoff_xid);
+			   debug_cutoff == presult.vm_conflict_horizon);
 	}
 #endif
 
@@ -1730,7 +1662,7 @@ lazy_scan_prune(LVRelState *vacrel,
 
 		if (presult.all_frozen)
 		{
-			Assert(!TransactionIdIsValid(presult.visibility_cutoff_xid));
+			Assert(!TransactionIdIsValid(presult.vm_conflict_horizon));
 			flags |= VISIBILITYMAP_ALL_FROZEN;
 		}
 
@@ -1750,7 +1682,7 @@ lazy_scan_prune(LVRelState *vacrel,
 		PageSetAllVisible(page);
 		MarkBufferDirty(buf);
 		visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
-						  vmbuffer, presult.visibility_cutoff_xid,
+						  vmbuffer, presult.vm_conflict_horizon,
 						  flags);
 	}
 
@@ -1815,11 +1747,11 @@ lazy_scan_prune(LVRelState *vacrel,
 		/*
 		 * Set the page all-frozen (and all-visible) in the VM.
 		 *
-		 * We can pass InvalidTransactionId as our visibility_cutoff_xid,
-		 * since a snapshotConflictHorizon sufficient to make everything safe
-		 * for REDO was logged when the page's tuples were frozen.
+		 * We can pass InvalidTransactionId as our vm_conflict_horizon, since
+		 * a snapshotConflictHorizon sufficient to make everything safe for
+		 * REDO was logged when the page's tuples were frozen.
 		 */
-		Assert(!TransactionIdIsValid(presult.visibility_cutoff_xid));
+		Assert(!TransactionIdIsValid(presult.vm_conflict_horizon));
 		visibilitymap_set(vacrel->rel, blkno, buf, InvalidXLogRecPtr,
 						  vmbuffer, InvalidTransactionId,
 						  VISIBILITYMAP_ALL_VISIBLE |
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index b3cd248fb64..88a6d504dff 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -1715,9 +1715,9 @@ TransactionIdIsActive(TransactionId xid)
  * Note: the approximate horizons (see definition of GlobalVisState) are
  * updated by the computations done here. That's currently required for
  * correctness and a small optimization. Without doing so it's possible that
- * heap vacuum's call to heap_page_prune() uses a more conservative horizon
- * than later when deciding which tuples can be removed - which the code
- * doesn't expect (breaking HOT).
+ * heap vacuum's call to heap_page_prune_and_freeze() uses a more conservative
+ * horizon than later when deciding which tuples can be removed - which the
+ * code doesn't expect (breaking HOT).
  */
 static void
 ComputeXidHorizons(ComputeXidHorizonsResult *h)
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 59c81f38e51..6f9c66a872b 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -195,13 +195,13 @@ typedef struct HeapPageFreeze
 /*
  * Per-page state returned from pruning
  */
-typedef struct PruneResult
+typedef struct PruneFreezeResult
 {
 	int			ndeleted;		/* Number of tuples deleted from the page */
 	int			nnewlpdead;		/* Number of newly LP_DEAD items */
 
 	/*
-	 * The rest of the fields in PruneResult are only guaranteed to be
+	 * The rest of the fields in PruneFreezeResult are only guaranteed to be
 	 * initialized if heap_page_prune is passed PruneReason VACUUM_SCAN.
 	 */
 
@@ -212,23 +212,22 @@ typedef struct PruneResult
 	 */
 	bool		all_visible;
 
-	/*
-	 * Whether or not the page is all-visible except for tuples which will be
-	 * removed during vacuum's second pass. This is used by VACUUM to
-	 * determine whether or not to consider opportunistically freezing the
-	 * page.
-	 */
-	bool		all_visible_except_removable;
-
 	/* Whether or not the page can be set all-frozen in the VM */
 	bool		all_frozen;
-	TransactionId visibility_cutoff_xid;	/* Newest xmin on the page */
+
+	/*
+	 * If the page is all-visible and not all-frozen this is the oldest xid
+	 * that can see the page as all-visible. It is to be used as the snapshot
+	 * conflict horizon when emitting a XLOG_HEAP2_VISIBLE record.
+	 */
+	TransactionId vm_conflict_horizon;
 
 	/*
 	 * Tuple visibility is only computed once for each tuple, for correctness
-	 * and efficiency reasons; see comment in heap_page_prune() for details.
-	 * This is of type int8[], instead of HTSV_Result[], so we can use -1 to
-	 * indicate no visibility has been computed, e.g. for LP_DEAD items.
+	 * and efficiency reasons; see comment in heap_page_prune_and_freeze() for
+	 * details. This is of type int8[], instead of HTSV_Result[], so we can
+	 * use -1 to indicate no visibility has been computed, e.g. for LP_DEAD
+	 * items.
 	 *
 	 * This needs to be MaxHeapTuplesPerPage + 1 long as FirstOffsetNumber is
 	 * 1. Otherwise every access would need to subtract 1.
@@ -242,9 +241,14 @@ typedef struct PruneResult
 	 * One entry for every tuple that we may freeze.
 	 */
 	HeapTupleFreeze frozen[MaxHeapTuplesPerPage];
-} PruneResult;
+	/* New value of relfrozenxid found by heap_page_prune_and_freeze() */
+	TransactionId new_relfrozenxid;
 
-/* 'reason' codes for heap_page_prune() */
+	/* New value of relminmxid found by heap_page_prune_and_freeze() */
+	MultiXactId new_relminmxid;
+} PruneFreezeResult;
+
+/* 'reason' codes for heap_page_prune_and_freeze() */
 typedef enum
 {
 	PRUNE_ON_ACCESS,			/* on-access pruning */
@@ -254,7 +258,7 @@ typedef enum
 
 /*
  * Pruning calculates tuple visibility once and saves the results in an array
- * of int8. See PruneResult.htsv for details. This helper function is meant to
+ * of int8. See PruneFreezeResult.htsv for details. This helper function is meant to
  * guard against examining visibility status array members which have not yet
  * been computed.
  */
@@ -332,6 +336,7 @@ extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
 								 Buffer *buffer, struct TM_FailureData *tmfd);
 
 extern void heap_inplace_update(Relation relation, HeapTuple tuple);
+
 extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 									  HeapPageFreeze *pagefrz,
 									  HeapTupleFreeze *frz, bool *totally_frozen);
@@ -358,13 +363,13 @@ extern TransactionId heap_index_delete_tuples(Relation rel,
 /* in heap/pruneheap.c */
 struct GlobalVisState;
 extern void heap_page_prune_opt(Relation relation, Buffer buffer);
-extern void heap_page_prune(Relation relation, Buffer buffer,
-							struct GlobalVisState *vistest,
-							bool mark_unused_now,
-							HeapPageFreeze *pagefrz,
-							PruneResult *presult,
-							PruneReason reason,
-							OffsetNumber *off_loc);
+extern void heap_page_prune_and_freeze(Relation relation, Buffer buffer,
+									   struct GlobalVisState *vistest,
+									   bool mark_unused_now,
+									   HeapPageFreeze *pagefrz,
+									   PruneFreezeResult *presult,
+									   PruneReason reason,
+									   OffsetNumber *off_loc);
 extern void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only,
 									OffsetNumber *redirected, int nredirected,
 									OffsetNumber *nowdead, int ndead,
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 4679660837c..cc6a33ab3ee 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -2192,8 +2192,8 @@ ProjectionPath
 PromptInterruptContext
 ProtocolVersion
 PrsStorage
+PruneFreezeResult
 PruneReason
-PruneResult
 PruneState
 PruneStepResult
 PsqlScanCallbacks
-- 
2.40.1

