From 2a2e5407f8e6f8200dbda1b95cd3ec8d379282dd Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Sun, 7 Jan 2024 17:03:17 -0500
Subject: [PATCH v4 12/19] Remove heap_freeze_execute_prepared()

In order to merge freeze and prune records, the execution of tuple
freezing and the WAL logging of the changes to the page must be
separated so that the WAL logging can be combined with prune WAL
logging. This commit makes a helper for the tuple freezing and then
inlines the contents of heap_freeze_execute_prepared() where it is
called in heap_page_prune().
---
 src/backend/access/heap/heapam.c    | 79 +++++------------------------
 src/backend/access/heap/pruneheap.c | 51 +++++++++++++++++--
 src/include/access/heapam.h         | 31 ++++++-----
 3 files changed, 77 insertions(+), 84 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 16e3f2520a4..e47b56e7856 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -91,9 +91,6 @@ static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
 static TM_Result heap_lock_updated_tuple(Relation rel, HeapTuple tuple,
 										 ItemPointer ctid, TransactionId xid,
 										 LockTupleMode mode);
-static int	heap_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples,
-								 xl_heap_freeze_plan *plans_out,
-								 OffsetNumber *offsets_out);
 static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
 								   uint16 *new_infomask2);
 static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax,
@@ -6343,9 +6340,9 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
  * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
  *
  * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
- * tuple that we returned true for, and call heap_freeze_execute_prepared to
- * execute freezing.  Caller must initialize pagefrz fields for page as a
- * whole before first call here for each heap page.
+ * tuple that we returned true for, and then execute freezing.  Caller must
+ * initialize pagefrz fields for page as a whole before first call here for
+ * each heap page.
  *
  * VACUUM caller decides on whether or not to freeze the page as a whole.
  * We'll often prepare freeze plans for a page that caller just discards.
@@ -6659,8 +6656,8 @@ heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
 }
 
 /*
-* Perform xmin/xmax XID status sanity checks before calling
-* heap_freeze_execute_prepared().
+* Perform xmin/xmax XID status sanity checks before actually executing freeze
+* plans.
 *
 * heap_prepare_freeze_tuple doesn't perform these checks directly because
 * pg_xact lookups are relatively expensive.  They shouldn't be repeated
@@ -6713,30 +6710,17 @@ heap_pre_freeze_checks(Buffer buffer,
 }
 
 /*
- * heap_freeze_execute_prepared
- *
- * Executes freezing of one or more heap tuples on a page on behalf of caller.
- * Caller passes an array of tuple plans from heap_prepare_freeze_tuple.
- * Caller must set 'offset' in each plan for us.  Note that we destructively
- * sort caller's tuples array in-place, so caller had better be done with it.
- *
- * WAL-logs the changes so that VACUUM can advance the rel's relfrozenxid
- * later on without any risk of unsafe pg_xact lookups, even following a hard
- * crash (or when querying from a standby).  We represent freezing by setting
- * infomask bits in tuple headers, but this shouldn't be thought of as a hint.
- * See section on buffer access rules in src/backend/storage/buffer/README.
+ * Helper which executes freezing of one or more heap tuples on a page on
+ * behalf of caller. Caller passes an array of tuple plans from
+ * heap_prepare_freeze_tuple. Caller must set 'offset' in each plan for us.
+ * Must be called in a critical section that also marks the buffer dirty and,
+ * if needed, emits WAL.
  */
 void
-heap_freeze_execute_prepared(Relation rel, Buffer buffer,
-							 TransactionId snapshotConflictHorizon,
-							 HeapTupleFreeze *tuples, int ntuples)
+heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
 {
 	Page		page = BufferGetPage(buffer);
 
-	Assert(ntuples > 0);
-
-	START_CRIT_SECTION();
-
 	for (int i = 0; i < ntuples; i++)
 	{
 		HeapTupleFreeze *frz = tuples + i;
@@ -6746,45 +6730,6 @@ heap_freeze_execute_prepared(Relation rel, Buffer buffer,
 		htup = (HeapTupleHeader) PageGetItem(page, itemid);
 		heap_execute_freeze_tuple(htup, frz);
 	}
-
-	MarkBufferDirty(buffer);
-
-	/* Now WAL-log freezing if necessary */
-	if (RelationNeedsWAL(rel))
-	{
-		xl_heap_freeze_plan plans[MaxHeapTuplesPerPage];
-		OffsetNumber offsets[MaxHeapTuplesPerPage];
-		int			nplans;
-		xl_heap_freeze_page xlrec;
-		XLogRecPtr	recptr;
-
-		/* Prepare deduplicated representation for use in WAL record */
-		nplans = heap_log_freeze_plan(tuples, ntuples, plans, offsets);
-
-		xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
-		xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(rel);
-		xlrec.nplans = nplans;
-
-		XLogBeginInsert();
-		XLogRegisterData((char *) &xlrec, SizeOfHeapFreezePage);
-
-		/*
-		 * The freeze plan array and offset array are not actually in the
-		 * buffer, but pretend that they are.  When XLogInsert stores the
-		 * whole buffer, the arrays need not be stored too.
-		 */
-		XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
-		XLogRegisterBufData(0, (char *) plans,
-							nplans * sizeof(xl_heap_freeze_plan));
-		XLogRegisterBufData(0, (char *) offsets,
-							ntuples * sizeof(OffsetNumber));
-
-		recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE_PAGE);
-
-		PageSetLSN(page, recptr);
-	}
-
-	END_CRIT_SECTION();
 }
 
 /*
@@ -6874,7 +6819,7 @@ heap_log_freeze_new_plan(xl_heap_freeze_plan *plan, HeapTupleFreeze *frz)
  * (actually there is one array per freeze plan, but that's not of immediate
  * concern to our caller).
  */
-static int
+int
 heap_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples,
 					 xl_heap_freeze_plan *plans_out,
 					 OffsetNumber *offsets_out)
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 87f99497865..7bd479cfd4e 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -640,10 +640,53 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer,
 
 	if (do_freeze)
 	{
-		/* Execute all freeze plans for page as a single atomic action */
-		heap_freeze_execute_prepared(relation, buffer,
-									 presult->frz_conflict_horizon,
-									 prstate.frozen, presult->nfrozen);
+		START_CRIT_SECTION();
+
+		Assert(presult->nfrozen > 0);
+
+		heap_freeze_prepared_tuples(buffer, prstate.frozen, presult->nfrozen);
+
+		MarkBufferDirty(buffer);
+
+		/* Now WAL-log freezing if necessary */
+		if (RelationNeedsWAL(relation))
+		{
+			xl_heap_freeze_plan plans[MaxHeapTuplesPerPage];
+			OffsetNumber offsets[MaxHeapTuplesPerPage];
+			int			nplans;
+			xl_heap_freeze_page xlrec;
+			XLogRecPtr	recptr;
+
+			/*
+			 * Prepare deduplicated representation for use in WAL record
+			 * Destructively sorts tuples array in-place.
+			 */
+			nplans = heap_log_freeze_plan(prstate.frozen, presult->nfrozen, plans, offsets);
+
+			xlrec.snapshotConflictHorizon = presult->frz_conflict_horizon;
+			xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(relation);
+			xlrec.nplans = nplans;
+
+			XLogBeginInsert();
+			XLogRegisterData((char *) &xlrec, SizeOfHeapFreezePage);
+
+			/*
+			 * The freeze plan array and offset array are not actually in the
+			 * buffer, but pretend that they are.  When XLogInsert stores the
+			 * whole buffer, the arrays need not be stored too.
+			 */
+			XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
+			XLogRegisterBufData(0, (char *) plans,
+								nplans * sizeof(xl_heap_freeze_plan));
+			XLogRegisterBufData(0, (char *) offsets,
+								presult->nfrozen * sizeof(OffsetNumber));
+
+			recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE_PAGE);
+
+			PageSetLSN(page, recptr);
+		}
+
+		END_CRIT_SECTION();
 	}
 	else if (!pagefrz || !presult->all_frozen || presult->nfrozen > 0)
 	{
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 02e33f213e1..321a46185e1 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -14,6 +14,7 @@
 #ifndef HEAPAM_H
 #define HEAPAM_H
 
+#include "access/heapam_xlog.h"
 #include "access/relation.h"	/* for backward compatibility */
 #include "access/relscan.h"
 #include "access/sdir.h"
@@ -101,8 +102,8 @@ typedef enum
 } HTSV_Result;
 
 /*
- * heap_prepare_freeze_tuple may request that heap_freeze_execute_prepared
- * check any tuple's to-be-frozen xmin and/or xmax status using pg_xact
+ * heap_prepare_freeze_tuple may request that any tuple's to-be-frozen xmin
+ * and/or xmax status is checked using pg_xact during freezing execution.
  */
 #define		HEAP_FREEZE_CHECK_XMIN_COMMITTED	0x01
 #define		HEAP_FREEZE_CHECK_XMAX_ABORTED		0x02
@@ -154,14 +155,14 @@ typedef struct HeapPageFreeze
 	/*
 	 * "Freeze" NewRelfrozenXid/NewRelminMxid trackers.
 	 *
-	 * Trackers used when heap_freeze_execute_prepared freezes, or when there
-	 * are zero freeze plans for a page.  It is always valid for vacuumlazy.c
-	 * to freeze any page, by definition.  This even includes pages that have
-	 * no tuples with storage to consider in the first place.  That way the
-	 * 'totally_frozen' results from heap_prepare_freeze_tuple can always be
-	 * used in the same way, even when no freeze plans need to be executed to
-	 * "freeze the page".  Only the "freeze" path needs to consider the need
-	 * to set pages all-frozen in the visibility map under this scheme.
+	 * Trackers used when tuples will be frozen, or when there are zero freeze
+	 * plans for a page.  It is always valid for vacuumlazy.c to freeze any
+	 * page, by definition.  This even includes pages that have no tuples with
+	 * storage to consider in the first place.  That way the 'totally_frozen'
+	 * results from heap_prepare_freeze_tuple can always be used in the same
+	 * way, even when no freeze plans need to be executed to "freeze the
+	 * page". Only the "freeze" path needs to consider the need to set pages
+	 * all-frozen in the visibility map under this scheme.
 	 *
 	 * When we freeze a page, we generally freeze all XIDs < OldestXmin, only
 	 * leaving behind XIDs that are ineligible for freezing, if any.  And so
@@ -315,12 +316,16 @@ extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 
 extern void heap_pre_freeze_checks(Buffer buffer,
 								   HeapTupleFreeze *tuples, int ntuples);
-extern void heap_freeze_execute_prepared(Relation rel, Buffer buffer,
-										 TransactionId snapshotConflictHorizon,
-										 HeapTupleFreeze *tuples, int ntuples);
+
+extern void heap_freeze_prepared_tuples(Buffer buffer,
+										HeapTupleFreeze *tuples, int ntuples);
 extern bool heap_freeze_tuple(HeapTupleHeader tuple,
 							  TransactionId relfrozenxid, TransactionId relminmxid,
 							  TransactionId FreezeLimit, TransactionId MultiXactCutoff);
+
+extern int	heap_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples,
+								 xl_heap_freeze_plan *plans_out,
+								 OffsetNumber *offsets_out);
 extern bool heap_tuple_should_freeze(HeapTupleHeader tuple,
 									 const struct VacuumCutoffs *cutoffs,
 									 TransactionId *NoFreezePageRelfrozenXid,
-- 
2.40.1

