From 5871dbff01dc89c0a1dfc09db341fff8314451c0 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Tue, 12 Mar 2024 19:07:38 -0400
Subject: [PATCH v3 16/17] Obsolete XLOG_HEAP2_FREEZE_PAGE

When vacuum freezes tuples, the information needed to replay these
changes is saved in the xl_heap_prune record. As such, we no longer need
to create new xl_heap_freeze records. We can get rid of
heap_freeze_execute_prepared() as well as the special case in
heap_page_prune_and_freeze() for when only freezing is done.

We must retain the xl_heap_freeze_page record and
heap_xlog_freeze_page() in order to replay old freeze records.
---
 src/backend/access/heap/heapam.c    |  78 ++-----------
 src/backend/access/heap/pruneheap.c | 163 ++++++++++++++--------------
 src/include/access/heapam.h         |   7 +-
 3 files changed, 88 insertions(+), 160 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index a8f35eba3c9..12a1a7805f4 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -6340,7 +6340,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
  * XIDs or MultiXactIds that will need to be processed by a future VACUUM.
  *
  * VACUUM caller must assemble HeapTupleFreeze freeze plan entries for every
- * tuple that we returned true for, and call heap_freeze_execute_prepared to
+ * tuple that we returned true for, and call heap_page_prune_and_freeze() to
  * execute freezing.  Caller must initialize pagefrz fields for page as a
  * whole before first call here for each heap page.
  *
@@ -6656,8 +6656,7 @@ heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
 }
 
 /*
-* Perform xmin/xmax XID status sanity checks before calling
-* heap_freeze_execute_prepared().
+* Perform xmin/xmax XID status sanity checks before executing freezing.
 *
 * heap_prepare_freeze_tuple doesn't perform these checks directly because
 * pg_xact lookups are relatively expensive.  They shouldn't be repeated
@@ -6732,70 +6731,6 @@ heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
 	}
 }
 
-/*
- * heap_freeze_execute_prepared
- *
- * Execute freezing of prepared tuples and WAL-logs the changes so that VACUUM
- * can advance the rel's relfrozenxid later on without any risk of unsafe
- * pg_xact lookups, even following a hard crash (or when querying from a
- * standby).  We represent freezing by setting infomask bits in tuple headers,
- * but this shouldn't be thought of as a hint. See section on buffer access
- * rules in src/backend/storage/buffer/README. Must be called from within a
- * critical section.
- */
-void
-heap_freeze_execute_prepared(Relation rel, Buffer buffer,
-							 TransactionId snapshotConflictHorizon,
-							 HeapTupleFreeze *tuples, int ntuples)
-{
-	Page		page = BufferGetPage(buffer);
-
-	Assert(ntuples > 0);
-
-	heap_freeze_prepared_tuples(buffer, tuples, ntuples);
-
-	MarkBufferDirty(buffer);
-
-	/* Now WAL-log freezing if necessary */
-	if (RelationNeedsWAL(rel))
-	{
-		xl_heap_freeze_plan plans[MaxHeapTuplesPerPage];
-		OffsetNumber offsets[MaxHeapTuplesPerPage];
-		int			nplans;
-		xl_heap_freeze_page xlrec;
-		XLogRecPtr	recptr;
-
-		/*
-		 * Prepare deduplicated representation for use in WAL record
-		 * Destructively sorts tuples array in-place, so caller had better be
-		 * done with it.
-		 */
-		nplans = heap_log_freeze_plan(tuples, ntuples, plans, offsets);
-
-		xlrec.snapshotConflictHorizon = snapshotConflictHorizon;
-		xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(rel);
-		xlrec.nplans = nplans;
-
-		XLogBeginInsert();
-		XLogRegisterData((char *) &xlrec, SizeOfHeapFreezePage);
-
-		/*
-		 * The freeze plan array and offset array are not actually in the
-		 * buffer, but pretend that they are.  When XLogInsert stores the
-		 * whole buffer, the arrays need not be stored too.
-		 */
-		XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
-		XLogRegisterBufData(0, (char *) plans,
-							nplans * sizeof(xl_heap_freeze_plan));
-		XLogRegisterBufData(0, (char *) offsets,
-							ntuples * sizeof(OffsetNumber));
-
-		recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_FREEZE_PAGE);
-
-		PageSetLSN(page, recptr);
-	}
-}
-
 /*
  * Comparator used to deduplicate XLOG_HEAP2_FREEZE_PAGE freeze plans
  */
@@ -8827,10 +8762,11 @@ heap_xlog_prune(XLogReaderState *record)
 		frz_offsets = nowunused + nunused;
 
 		/* Update all line pointers per the record, and repair fragmentation */
-		heap_page_prune_execute(buffer,
-								redirected, nredirected,
-								nowdead, ndead,
-								nowunused, nunused);
+		if (nredirected > 0 || ndead > 0 || nunused > 0)
+			heap_page_prune_execute(buffer,
+									redirected, nredirected,
+									nowdead, ndead,
+									nowunused, nunused);
 
 		for (int p = 0; p < nplans; p++)
 		{
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index fa628e410e6..6f45e5c37f0 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -270,6 +270,7 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer,
 	HeapTupleData tup;
 	bool		do_freeze;
 	bool		do_prune;
+	bool		do_hint;
 	bool		whole_page_freezable;
 	bool		hint_bit_fpi;
 	bool		prune_fpi = false;
@@ -583,6 +584,9 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer,
 		prstate.ndead > 0 ||
 		prstate.nunused > 0;
 
+	/* Record number of newly-set-LP_DEAD items for caller */
+	presult->nnewlpdead = prstate.ndead;
+
 	/*
 	 * Only incur overhead of checking if we will do an FPI if we might use
 	 * the information.
@@ -590,7 +594,15 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer,
 	if (do_prune && pagefrz)
 		prune_fpi = XLogCheckBufferNeedsBackup(buffer);
 
-	/* Is the whole page freezable? And is there something to freeze */
+	/*
+	 * Even if we don't prune anything, if we found a new value for the
+	 * pd_prune_xid field or the page was marked full, we will update the hint
+	 * bit.
+	 */
+	do_hint = ((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
+		PageIsFull(page);
+
+	/* Is the whole page freezable? And is there something to freeze? */
 	whole_page_freezable = presult->all_visible_except_removable &&
 		presult->all_frozen;
 
@@ -605,55 +617,63 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer,
 	 * opportunistic freeze heuristic must be improved; however, for now, try
 	 * to approximate it.
 	 */
-
 	do_freeze = pagefrz &&
 		(pagefrz->freeze_required ||
 		 (whole_page_freezable && presult->nfrozen > 0 && (prune_fpi || hint_bit_fpi)));
 
-	if (do_freeze)
-	{
+	/*
+	 * Validate the tuples we are considering freezing. We do this even if
+	 * pruning and hint bit setting have not emitted an FPI so far because we
+	 * still may emit an FPI while setting the page hint bit later. But we
+	 * want to avoid doing the pre-freeze checks in a critical section.
+	 */
+	if (pagefrz &&
+		(pagefrz->freeze_required ||
+		 (whole_page_freezable && presult->nfrozen > 0)))
 		heap_pre_freeze_checks(buffer, frozen, presult->nfrozen);
-		frz_conflict_horizon = heap_frz_conflict_horizon(presult, pagefrz);
-	}
-	else if (!pagefrz || !presult->all_frozen || presult->nfrozen > 0)
-	{
-		/*
-		 * If we will neither freeze tuples on the page nor set the page all
-		 * frozen in the visibility map, the page is not all frozen and there
-		 * will be no newly frozen tuples.
-		 */
-		presult->all_frozen = false;
-		presult->nfrozen = 0;	/* avoid miscounts in instrumentation */
-	}
 
-	/* Record number of newly-set-LP_DEAD items for caller */
-	presult->nnewlpdead = prstate.ndead;
+	/*
+	 * If we are going to modify the page contents anyway, we will have to
+	 * update more than hint bits.
+	 */
+	if (do_freeze || do_prune)
+		do_hint = false;
 
+	START_CRIT_SECTION();
 
-	/* Have we found any prunable items? */
-	if (!do_prune)
-	{
-		/* Any error while applying the changes is critical */
-		START_CRIT_SECTION();
+	/*
+	 * Update the page's pd_prune_xid field to either zero, or the lowest XID
+	 * of any soon-prunable tuple.
+	 */
+	((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
 
-		/*
-		 * If we didn't prune anything, but have found a new value for the
-		 * pd_prune_xid field, update it and mark the buffer dirty. This is
-		 * treated as a non-WAL-logged hint.
-		 *
-		 * Also clear the "page is full" flag if it is set, since there's no
-		 * point in repeating the prune/defrag process until something else
-		 * happens to the page.
-		 */
-		if (((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
-			PageIsFull(page))
-		{
-			((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
-			PageClearFull(page);
-			MarkBufferDirtyHint(buffer, true);
-		}
+	/*
+	 * If pruning, freezing, or updating the hint bit, clear the "page is
+	 * full" flag if it is set since there's no point in repeating the
+	 * prune/defrag process until something else happens to the page.
+	 */
+	if (do_prune || do_freeze || do_hint)
+		PageClearFull(page);
 
-		hint_bit_fpi = fpi_before != pgWalUsage.wal_fpi;
+	/*
+	 * Apply the planned item changes, then repair page fragmentation, and
+	 * update the page's hint bit about whether it has free line pointers.
+	 */
+	if (do_prune)
+	{
+		heap_page_prune_execute(buffer,
+								prstate.redirected, prstate.nredirected,
+								prstate.nowdead, prstate.ndead,
+								prstate.nowunused, prstate.nunused);
+	}
+
+	/*
+	 * If we aren't pruning or freezing anything, but we updated pd_prune_xid,
+	 * this is a non-WAL-logged hint.
+	 */
+	if (do_hint)
+	{
+		MarkBufferDirtyHint(buffer, true);
 
 		/*
 		 * We may have decided not to opportunistically freeze above because
@@ -661,60 +681,37 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer,
 		 * enabled, setting the hint bit may have emitted an FPI. Check again
 		 * if we should freeze.
 		 */
-		if (!do_freeze && hint_bit_fpi)
+		hint_bit_fpi = fpi_before != pgWalUsage.wal_fpi;
+
+		if (hint_bit_fpi)
 			do_freeze = pagefrz &&
 				(pagefrz->freeze_required ||
 				 (whole_page_freezable && presult->nfrozen > 0));
-
-		if (do_freeze)
-		{
-			heap_freeze_execute_prepared(relation, buffer,
-										 frz_conflict_horizon,
-										 frozen, presult->nfrozen);
-		}
-		else if (!pagefrz || !presult->all_frozen || presult->nfrozen > 0)
-		{
-			presult->all_frozen = false;
-			presult->nfrozen = 0;
-		}
-
-		END_CRIT_SECTION();
-
-		goto update_frozenxids;
 	}
 
-	START_CRIT_SECTION();
-
-	/*
-	 * Apply the planned item changes, then repair page fragmentation, and
-	 * update the page's hint bit about whether it has free line pointers.
-	 */
-	heap_page_prune_execute(buffer,
-							prstate.redirected, prstate.nredirected,
-							prstate.nowdead, prstate.ndead,
-							prstate.nowunused, prstate.nunused);
-
-	/*
-	 * Update the page's pd_prune_xid field to either zero, or the lowest XID
-	 * of any soon-prunable tuple.
-	 */
-	((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
-
-	/*
-	 * Also clear the "page is full" flag, since there's no point in repeating
-	 * the prune/defrag process until something else happens to the page.
-	 */
-	PageClearFull(page);
-
 	if (do_freeze)
+	{
+		frz_conflict_horizon = heap_frz_conflict_horizon(presult, pagefrz);
 		heap_freeze_prepared_tuples(buffer, frozen, presult->nfrozen);
+	}
+	else if ((!pagefrz || !presult->all_frozen || presult->nfrozen > 0))
+	{
+		/*
+		 * If we will neither freeze tuples on the page nor set the page all
+		 * frozen in the visibility map, the page is not all-frozen and there
+		 * will be no newly frozen tuples.
+		 */
+		presult->all_frozen = false;
+		presult->nfrozen = 0;	/* avoid miscounts in instrumentation */
+	}
 
-	MarkBufferDirty(buffer);
+	if (do_prune || do_freeze)
+		MarkBufferDirty(buffer);
 
 	/*
 	 * Emit a WAL XLOG_HEAP2_PRUNE record showing what we did
 	 */
-	if (RelationNeedsWAL(relation))
+	if ((do_prune || do_freeze) && RelationNeedsWAL(relation))
 	{
 		xl_heap_prune xlrec;
 		XLogRecPtr	recptr;
@@ -789,8 +786,6 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer,
 
 	END_CRIT_SECTION();
 
-update_frozenxids:
-
 	/* Caller won't update new_relfrozenxid and new_relminmxid */
 	if (!pagefrz)
 		return;
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index cc3071644c3..c36623f53bd 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -102,7 +102,7 @@ typedef enum
 } HTSV_Result;
 
 /*
- * heap_prepare_freeze_tuple may request that heap_freeze_execute_prepared
+ * heap_prepare_freeze_tuple may request that the heap_page_prune_and_freeze()
  * check any tuple's to-be-frozen xmin and/or xmax status using pg_xact
  */
 #define		HEAP_FREEZE_CHECK_XMIN_COMMITTED	0x01
@@ -155,7 +155,7 @@ typedef struct HeapPageFreeze
 	/*
 	 * "Freeze" NewRelfrozenXid/NewRelminMxid trackers.
 	 *
-	 * Trackers used when heap_freeze_execute_prepared freezes, or when there
+	 * Trackers used when heap_page_prune_and_freeze() freezes, or when there
 	 * are zero freeze plans for a page.  It is always valid for vacuumlazy.c
 	 * to freeze any page, by definition.  This even includes pages that have
 	 * no tuples with storage to consider in the first place.  That way the
@@ -298,9 +298,6 @@ extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 
 extern void heap_pre_freeze_checks(Buffer buffer,
 								   HeapTupleFreeze *tuples, int ntuples);
-extern void heap_freeze_execute_prepared(Relation rel, Buffer buffer,
-										 TransactionId snapshotConflictHorizon,
-										 HeapTupleFreeze *tuples, int ntuples);
 
 extern void heap_freeze_prepared_tuples(Buffer buffer,
 										HeapTupleFreeze *tuples, int ntuples);
-- 
2.40.1

