From 7e9ca57da9c80918c3b4c391874869cca8939456 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Sat, 6 Jan 2024 16:22:17 -0500
Subject: [PATCH v1 04/15] Add reference to VacuumCutoffs in HeapPageFreeze

Future commits will move opportunistic freezing into the main path of
pruning in heap_page_prune(). Because on-access pruning will not do
opportunistic freezing, it is cleaner to keep the visibility information
required for calling heap_prepare_freeze_tuple() inside of the
HeapPageFreeze structure itself by saving a reference to VacuumCutoffs.
---
 src/backend/access/heap/heapam.c     | 32 ++++++++++++++--------------
 src/backend/access/heap/vacuumlazy.c |  3 ++-
 src/include/access/heapam.h          |  2 +-
 3 files changed, 19 insertions(+), 18 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 707460a5364..76eb67f746a 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -6377,7 +6377,6 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
  */
 bool
 heap_prepare_freeze_tuple(HeapTupleHeader tuple,
-						  const struct VacuumCutoffs *cutoffs,
 						  HeapPageFreeze *pagefrz,
 						  HeapTupleFreeze *frz, bool *totally_frozen)
 {
@@ -6405,14 +6404,14 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 		xmin_already_frozen = true;
 	else
 	{
-		if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
+		if (TransactionIdPrecedes(xid, pagefrz->cutoffs->relfrozenxid))
 			ereport(ERROR,
 					(errcode(ERRCODE_DATA_CORRUPTED),
 					 errmsg_internal("found xmin %u from before relfrozenxid %u",
-									 xid, cutoffs->relfrozenxid)));
+									 xid, pagefrz->cutoffs->relfrozenxid)));
 
 		/* Will set freeze_xmin flags in freeze plan below */
-		freeze_xmin = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
+		freeze_xmin = TransactionIdPrecedes(xid, pagefrz->cutoffs->OldestXmin);
 
 		/* Verify that xmin committed if and when freeze plan is executed */
 		if (freeze_xmin)
@@ -6426,8 +6425,8 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 	xid = HeapTupleHeaderGetXvac(tuple);
 	if (TransactionIdIsNormal(xid))
 	{
-		Assert(TransactionIdPrecedesOrEquals(cutoffs->relfrozenxid, xid));
-		Assert(TransactionIdPrecedes(xid, cutoffs->OldestXmin));
+		Assert(TransactionIdPrecedesOrEquals(pagefrz->cutoffs->relfrozenxid, xid));
+		Assert(TransactionIdPrecedes(xid, pagefrz->cutoffs->OldestXmin));
 
 		/*
 		 * For Xvac, we always freeze proactively.  This allows totally_frozen
@@ -6452,7 +6451,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 		 * perform no-op xmax processing.  The only constraint is that the
 		 * FreezeLimit/MultiXactCutoff postcondition must never be violated.
 		 */
-		newxmax = FreezeMultiXactId(xid, tuple->t_infomask, cutoffs,
+		newxmax = FreezeMultiXactId(xid, tuple->t_infomask, pagefrz->cutoffs,
 									&flags, pagefrz);
 
 		if (flags & FRM_NOOP)
@@ -6476,7 +6475,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 			 * (This repeats work from FreezeMultiXactId, but allows "no
 			 * freeze" tracker maintenance to happen in only one place.)
 			 */
-			Assert(!MultiXactIdPrecedes(newxmax, cutoffs->MultiXactCutoff));
+			Assert(!MultiXactIdPrecedes(newxmax, pagefrz->cutoffs->MultiXactCutoff));
 			Assert(MultiXactIdIsValid(newxmax) && xid == newxmax);
 		}
 		else if (flags & FRM_RETURN_IS_XID)
@@ -6485,7 +6484,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 			 * xmax will become an updater Xid (original MultiXact's updater
 			 * member Xid will be carried forward as a simple Xid in Xmax).
 			 */
-			Assert(!TransactionIdPrecedes(newxmax, cutoffs->OldestXmin));
+			Assert(!TransactionIdPrecedes(newxmax, pagefrz->cutoffs->OldestXmin));
 
 			/*
 			 * NB -- some of these transformations are only valid because we
@@ -6509,7 +6508,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 			 * xmax is an old MultiXactId that we have to replace with a new
 			 * MultiXactId, to carry forward two or more original member XIDs.
 			 */
-			Assert(!MultiXactIdPrecedes(newxmax, cutoffs->OldestMxact));
+			Assert(!MultiXactIdPrecedes(newxmax, pagefrz->cutoffs->OldestMxact));
 
 			/*
 			 * We can't use GetMultiXactIdHintBits directly on the new multi
@@ -6544,14 +6543,14 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 	else if (TransactionIdIsNormal(xid))
 	{
 		/* Raw xmax is normal XID */
-		if (TransactionIdPrecedes(xid, cutoffs->relfrozenxid))
+		if (TransactionIdPrecedes(xid, pagefrz->cutoffs->relfrozenxid))
 			ereport(ERROR,
 					(errcode(ERRCODE_DATA_CORRUPTED),
 					 errmsg_internal("found xmax %u from before relfrozenxid %u",
-									 xid, cutoffs->relfrozenxid)));
+									 xid, pagefrz->cutoffs->relfrozenxid)));
 
 		/* Will set freeze_xmax flags in freeze plan below */
-		freeze_xmax = TransactionIdPrecedes(xid, cutoffs->OldestXmin);
+		freeze_xmax = TransactionIdPrecedes(xid, pagefrz->cutoffs->OldestXmin);
 
 		/*
 		 * Verify that xmax aborted if and when freeze plan is executed,
@@ -6631,7 +6630,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple,
 		 * Does this tuple force caller to freeze the entire page?
 		 */
 		pagefrz->freeze_required =
-			heap_tuple_should_freeze(tuple, cutoffs,
+			heap_tuple_should_freeze(tuple, pagefrz->cutoffs,
 									 &pagefrz->NoFreezePageRelfrozenXid,
 									 &pagefrz->NoFreezePageRelminMxid);
 	}
@@ -6953,8 +6952,9 @@ heap_freeze_tuple(HeapTupleHeader tuple,
 	pagefrz.NoFreezePageRelfrozenXid = FreezeLimit;
 	pagefrz.NoFreezePageRelminMxid = MultiXactCutoff;
 
-	do_freeze = heap_prepare_freeze_tuple(tuple, &cutoffs,
-										  &pagefrz, &frz, &totally_frozen);
+	pagefrz.cutoffs = &cutoffs;
+
+	do_freeze = heap_prepare_freeze_tuple(tuple, &pagefrz, &frz, &totally_frozen);
 
 	/*
 	 * Note that because this is not a WAL-logged operation, we don't need to
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 94c4a4cf1da..8651040f8de 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -1413,6 +1413,7 @@ lazy_scan_prune(LVRelState *vacrel,
 	pagefrz.FreezePageRelminMxid = vacrel->NewRelminMxid;
 	pagefrz.NoFreezePageRelfrozenXid = vacrel->NewRelfrozenXid;
 	pagefrz.NoFreezePageRelminMxid = vacrel->NewRelminMxid;
+	pagefrz.cutoffs = &vacrel->cutoffs;
 	tuples_frozen = 0;
 	lpdead_items = 0;
 	live_tuples = 0;
@@ -1558,7 +1559,7 @@ lazy_scan_prune(LVRelState *vacrel,
 		hastup = true;			/* page makes rel truncation unsafe */
 
 		/* Tuple with storage -- consider need to freeze */
-		if (heap_prepare_freeze_tuple(htup, &vacrel->cutoffs, &pagefrz,
+		if (heap_prepare_freeze_tuple(htup, &pagefrz,
 									  &frozen[tuples_frozen], &totally_frozen))
 		{
 			/* Save prepared freeze plan for later */
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index 4cfaf9ea46c..6823ab8b658 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -189,6 +189,7 @@ typedef struct HeapPageFreeze
 	TransactionId NoFreezePageRelfrozenXid;
 	MultiXactId NoFreezePageRelminMxid;
 
+	struct VacuumCutoffs *cutoffs;
 } HeapPageFreeze;
 
 /*
@@ -295,7 +296,6 @@ extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
 
 extern void heap_inplace_update(Relation relation, HeapTuple tuple);
 extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
-									  const struct VacuumCutoffs *cutoffs,
 									  HeapPageFreeze *pagefrz,
 									  HeapTupleFreeze *frz, bool *totally_frozen);
 extern void heap_freeze_execute_prepared(Relation rel, Buffer buffer,
-- 
2.37.2

