From 5d0ff10bab645988f228050f3cd3084163bcc2e2 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Tue, 26 Mar 2024 09:10:14 -0400
Subject: [PATCH v7 11/16] Remove heap_freeze_execute_prepared()

In order to merge freeze and prune records, the execution of tuple
freezing and the WAL logging of the changes to the page must be
separated so that the WAL logging can be combined with prune WAL
logging. This commit makes a helper for the tuple freezing and then
inlines the contents of heap_freeze_execute_prepared() where it is
called in heap_page_prune().
---
 src/backend/access/heap/heapam.c    | 49 +++++++----------------------
 src/backend/access/heap/pruneheap.c | 22 ++++++++++---
 src/include/access/heapam.h         | 28 +++++++++--------
 3 files changed, 44 insertions(+), 55 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index be48098f7f3..1c1785994b1 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -6340,9 +6340,9 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
  * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
  *
  * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
- * tuple that we returned true for, and call heap_freeze_execute_prepared to
- * execute freezing.  Caller must initialize pagefrz fields for page as a
- * whole before first call here for each heap page.
+ * tuple that we returned true for, and then execute freezing.  Caller must
+ * initialize pagefrz fields for page as a whole before first call here for
+ * each heap page.
  *
  * VACUUM caller decides on whether or not to freeze the page as a whole.
  * We'll often prepare freeze plans for a page that caller just discards.
@@ -6657,8 +6657,8 @@ heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
 }
 
 /*
-* Perform xmin/xmax XID status sanity checks before calling
-* heap_freeze_execute_prepared().
+* Perform xmin/xmax XID status sanity checks before actually executing freeze
+* plans.
 *
 * heap_prepare_freeze_tuple doesn't perform these checks directly because
 * pg_xact lookups are relatively expensive.  They shouldn't be repeated
@@ -6711,30 +6711,17 @@ heap_pre_freeze_checks(Buffer buffer,
 }
 
 /*
- * heap_freeze_execute_prepared
- *
- * Executes freezing of one or more heap tuples on a page on behalf of caller.
- * Caller passes an array of tuple plans from heap_prepare_freeze_tuple.
- * Caller must set 'offset' in each plan for us.  Note that we destructively
- * sort caller's tuples array in-place, so caller had better be done with it.
- *
- * WAL-logs the changes so that VACUUM can advance the rel's relfrozenxid
- * later on without any risk of unsafe pg_xact lookups, even following a hard
- * crash (or when querying from a standby).  We represent freezing by setting
- * infomask bits in tuple headers, but this shouldn't be thought of as a hint.
- * See section on buffer access rules in src/backend/storage/buffer/README.
+ * Helper which executes freezing of one or more heap tuples on a page on
+ * behalf of caller. Caller passes an array of tuple plans from
+ * heap_prepare_freeze_tuple. Caller must set 'offset' in each plan for us.
+ * Must be called in a critical section that also marks the buffer dirty and,
+ * if needed, emits WAL.
  */
 void
-heap_freeze_execute_prepared(Relation rel, Buffer buffer,
-							 TransactionId snapshotConflictHorizon,
-							 HeapTupleFreeze *tuples, int ntuples)
+heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
 {
 	Page		page = BufferGetPage(buffer);
 
-	Assert(ntuples > 0);
-
-	START_CRIT_SECTION();
-
 	for (int i = 0; i < ntuples; i++)
 	{
 		HeapTupleFreeze *frz = tuples + i;
@@ -6746,20 +6733,6 @@ heap_freeze_execute_prepared(Relation rel, Buffer buffer,
 	}
 
 	MarkBufferDirty(buffer);
-
-	/* Now WAL-log freezing if necessary */
-	if (RelationNeedsWAL(rel))
-	{
-		log_heap_prune_and_freeze(rel, buffer, snapshotConflictHorizon,
-								  false,	/* no cleanup lock required */
-								  PRUNE_VACUUM_SCAN,
-								  tuples, ntuples,
-								  NULL, 0,	/* redirected */
-								  NULL, 0,	/* dead */
-								  NULL, 0); /* unused */
-	}
-
-	END_CRIT_SECTION();
 }
 
 /*
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index fe463ad7146..8914d4bf5c8 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -635,10 +635,24 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer,
 
 	if (do_freeze)
 	{
-		/* Execute all freeze plans for page as a single atomic action */
-		heap_freeze_execute_prepared(relation, buffer,
-									 frz_conflict_horizon,
-									 prstate.frozen, presult->nfrozen);
+		START_CRIT_SECTION();
+
+		Assert(presult->nfrozen > 0);
+
+		heap_freeze_prepared_tuples(buffer, prstate.frozen, presult->nfrozen);
+
+		MarkBufferDirty(buffer);
+
+		/* Now WAL-log freezing if necessary */
+		if (RelationNeedsWAL(relation))
+			log_heap_prune_and_freeze(relation, buffer,
+									  frz_conflict_horizon, false, reason,
+									  prstate.frozen, presult->nfrozen,
+									  NULL, 0,	/* redirected */
+									  NULL, 0,	/* dead */
+									  NULL, 0); /* unused */
+
+		END_CRIT_SECTION();
 	}
 	else if (!pagefrz || !presult->all_frozen || presult->nfrozen > 0)
 	{
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index dbf6323b5ff..ac0ef6e4281 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -14,6 +14,7 @@
 #ifndef HEAPAM_H
 #define HEAPAM_H
 
+#include "access/heapam_xlog.h"
 #include "access/relation.h"	/* for backward compatibility */
 #include "access/relscan.h"
 #include "access/sdir.h"
@@ -101,8 +102,8 @@ typedef enum
 } HTSV_Result;
 
 /*
- * heap_prepare_freeze_tuple may request that heap_freeze_execute_prepared
- * check any tuple's to-be-frozen xmin and/or xmax status using pg_xact
+ * heap_prepare_freeze_tuple may request that any tuple's to-be-frozen xmin
+ * and/or xmax status is checked using pg_xact during freezing execution.
  */
 #define		HEAP_FREEZE_CHECK_XMIN_COMMITTED	0x01
 #define		HEAP_FREEZE_CHECK_XMAX_ABORTED		0x02
@@ -154,14 +155,14 @@ typedef struct HeapPageFreeze
 	/*
 	 * "Freeze" NewRelfrozenXid/NewRelminMxid trackers.
 	 *
-	 * Trackers used when heap_freeze_execute_prepared freezes, or when there
-	 * are zero freeze plans for a page.  It is always valid for vacuumlazy.c
-	 * to freeze any page, by definition.  This even includes pages that have
-	 * no tuples with storage to consider in the first place.  That way the
-	 * 'totally_frozen' results from heap_prepare_freeze_tuple can always be
-	 * used in the same way, even when no freeze plans need to be executed to
-	 * "freeze the page".  Only the "freeze" path needs to consider the need
-	 * to set pages all-frozen in the visibility map under this scheme.
+	 * Trackers used when tuples will be frozen, or when there are zero freeze
+	 * plans for a page.  It is always valid for vacuumlazy.c to freeze any
+	 * page, by definition.  This even includes pages that have no tuples with
+	 * storage to consider in the first place.  That way the 'totally_frozen'
+	 * results from heap_prepare_freeze_tuple can always be used in the same
+	 * way, even when no freeze plans need to be executed to "freeze the
+	 * page". Only the "freeze" path needs to consider the need to set pages
+	 * all-frozen in the visibility map under this scheme.
 	 *
 	 * When we freeze a page, we generally freeze all XIDs < OldestXmin, only
 	 * leaving behind XIDs that are ineligible for freezing, if any.  And so
@@ -343,12 +344,13 @@ extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 
 extern void heap_pre_freeze_checks(Buffer buffer,
 								   HeapTupleFreeze *tuples, int ntuples);
-extern void heap_freeze_execute_prepared(Relation rel, Buffer buffer,
-										 TransactionId snapshotConflictHorizon,
-										 HeapTupleFreeze *tuples, int ntuples);
+
+extern void heap_freeze_prepared_tuples(Buffer buffer,
+										HeapTupleFreeze *tuples, int ntuples);
 extern bool heap_freeze_tuple(HeapTupleHeader tuple,
 							  TransactionId relfrozenxid, TransactionId relminmxid,
 							  TransactionId FreezeLimit, TransactionId MultiXactCutoff);
+
 extern bool heap_tuple_should_freeze(HeapTupleHeader tuple,
 									 const struct VacuumCutoffs *cutoffs,
 									 TransactionId *NoFreezePageRelfrozenXid,
-- 
2.40.1

