From d81331eb950675fab09ab2cbc6598861bcbf4c84 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Wed, 13 Mar 2024 00:28:57 -0400
Subject: [PATCH v2 17/17] Streamline XLOG_HEAP2_PRUNE record

xl_heap_prune struct for the XLOG_HEAP2_PRUNE record type had members
for counting the number of freeze plans and number of redirected, dead,
and newly unused line pointers. However, only some of those are used in
many XLOG_HEAP2_PRUNE records. As part of a refactor to use
XLOG_HEAP2_PRUNE record types instead of XLOG_HEAP2_FREEZE_PAGE records
when only freezing is being done, eliminate those members and instead
use flags to indicate which of those types of modifications will be
done. The resulting record will contain only data about modifications
that must be done.

ci-os-only:
---
 src/backend/access/heap/heapam.c       | 101 ++++++++++++++-----
 src/backend/access/heap/pruneheap.c    |  86 ++++++++++++----
 src/backend/access/rmgrdesc/heapdesc.c | 130 +++++++++++++++++++------
 src/include/access/heapam_xlog.h       | 122 ++++++++++++++---------
 src/tools/pgindent/typedefs.list       |   2 +
 5 files changed, 318 insertions(+), 123 deletions(-)

diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 12a1a7805f4..258f58b53e0 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8705,8 +8705,6 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_required,
 
 /*
  * Handles XLOG_HEAP2_PRUNE record type.
- *
- * Acquires a full cleanup lock.
  */
 static void
 heap_xlog_prune(XLogReaderState *record)
@@ -8717,49 +8715,101 @@ heap_xlog_prune(XLogReaderState *record)
 	RelFileLocator rlocator;
 	BlockNumber blkno;
 	XLogRedoAction action;
+	bool		get_cleanup_lock;
 
 	XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
 
+	/*
+	 * If there are dead, redirected, or unused items,
+	 * heap_page_prune_execute() will call PageRepairFragementation() which
+	 * expects a full cleanup lock.
+	 */
+	get_cleanup_lock = xlrec->flags & XLHP_HAS_REDIRECTIONS ||
+		xlrec->flags & XLHP_HAS_DEAD_ITEMS ||
+		xlrec->flags & XLHP_HAS_NOW_UNUSED_ITEMS;
+
 	/*
 	 * We're about to remove tuples. In Hot Standby mode, ensure that there's
 	 * no queries running for which the removed tuples are still visible.
 	 */
 	if (InHotStandby)
 		ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
-											xlrec->isCatalogRel,
+											xlrec->flags & XLHP_IS_CATALOG_REL,
 											rlocator);
 
 	/*
-	 * If we have a full-page image, restore it (using a cleanup lock) and
-	 * we're done.
+	 * If we have a full-page image, restore it and we're done.
 	 */
-	action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true,
-										   &buffer);
+	action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL,
+										   get_cleanup_lock, &buffer);
+
 	if (action == BLK_NEEDS_REDO)
 	{
 		Page		page = (Page) BufferGetPage(buffer);
-		OffsetNumber *redirected;
-		OffsetNumber *nowdead;
-		OffsetNumber *nowunused;
-		int			nredirected;
-		int			ndead;
-		int			nunused;
-		int			nplans;
 		Size		datalen;
-		xl_heap_freeze_plan *plans;
+		OffsetNumber *redirected = NULL;
+		OffsetNumber *nowdead = NULL;
+		OffsetNumber *nowunused = NULL;
+		int			nredirected = 0;
+		int			ndead = 0;
+		int			nunused = 0;
+		int			nplans = 0;
 		OffsetNumber *frz_offsets;
+		xl_heap_freeze_plan *plans;
 		int			curoff = 0;
 
-		nplans = xlrec->nplans;
-		nredirected = xlrec->nredirected;
-		ndead = xlrec->ndead;
-		nunused = xlrec->nunused;
+		char	   *cursor = XLogRecGetBlockData(record, 0, &datalen);
+
+		if (xlrec->flags & XLHP_HAS_FREEZE_PLANS)
+		{
+			xlhp_freeze *freeze = (xlhp_freeze *) cursor;
+
+			nplans = freeze->nplans;
+			Assert(nplans > 0);
+			plans = freeze->plans;
+
+			cursor += offsetof(xlhp_freeze, plans);
+			cursor += sizeof(xl_heap_freeze_plan) * freeze->nplans;
+		}
 
-		plans = (xl_heap_freeze_plan *) XLogRecGetBlockData(record, 0, &datalen);
-		redirected = (OffsetNumber *) &plans[nplans];
-		nowdead = redirected + (nredirected * 2);
-		nowunused = nowdead + ndead;
-		frz_offsets = nowunused + nunused;
+		if (xlrec->flags & XLHP_HAS_REDIRECTIONS)
+		{
+			xlhp_prune_items *subrecord = (xlhp_prune_items *) cursor;
+
+			nredirected = subrecord->ntargets;
+			Assert(nredirected > 0);
+			redirected = &subrecord->data[0];
+
+			cursor += offsetof(xlhp_prune_items, data);
+			cursor += sizeof(OffsetNumber[2]) * nredirected;
+		}
+
+		if (xlrec->flags & XLHP_HAS_DEAD_ITEMS)
+		{
+			xlhp_prune_items *subrecord = (xlhp_prune_items *) cursor;
+
+			ndead = subrecord->ntargets;
+			Assert(ndead > 0);
+			nowdead = subrecord->data;
+
+			cursor += offsetof(xlhp_prune_items, data);
+			cursor += sizeof(OffsetNumber) * ndead;
+		}
+
+		if (xlrec->flags & XLHP_HAS_NOW_UNUSED_ITEMS)
+		{
+			xlhp_prune_items *subrecord = (xlhp_prune_items *) cursor;
+
+			nunused = subrecord->ntargets;
+			Assert(nunused > 0);
+			nowunused = subrecord->data;
+
+			cursor += offsetof(xlhp_prune_items, data);
+			cursor += sizeof(OffsetNumber) * nunused;
+		}
+
+		if (nplans > 0)
+			frz_offsets = (OffsetNumber *) cursor;
 
 		/* Update all line pointers per the record, and repair fragmentation */
 		if (nredirected > 0 || ndead > 0 || nunused > 0)
@@ -8798,7 +8848,6 @@ heap_xlog_prune(XLogReaderState *record)
 		 * Note: we don't worry about updating the page's prunability hints.
 		 * At worst this will cause an extra prune cycle to occur soon.
 		 */
-
 		PageSetLSN(page, lsn);
 		MarkBufferDirty(buffer);
 	}
@@ -8810,7 +8859,7 @@ heap_xlog_prune(XLogReaderState *record)
 		UnlockReleaseBuffer(buffer);
 
 		/*
-		 * After pruning records from a page, it's useful to update the FSM
+		 * After modifying records on a page, it's useful to update the FSM
 		 * about it, as it may cause the page become target for insertions
 		 * later even if vacuum decides not to visit it (which is possible if
 		 * gets marked all-visible.)
diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c
index 7a27c5a3957..06739f8ad49 100644
--- a/src/backend/access/heap/pruneheap.c
+++ b/src/backend/access/heap/pruneheap.c
@@ -691,15 +691,19 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer,
 	{
 		xl_heap_prune xlrec;
 		XLogRecPtr	recptr;
+		xlhp_freeze freeze;
+		xlhp_prune_items redirect,
+					dead,
+					unused;
 
+		int			nplans = 0;
 		xl_heap_freeze_plan plans[MaxHeapTuplesPerPage];
-		OffsetNumber offsets[MaxHeapTuplesPerPage];
+		OffsetNumber frz_offsets[MaxHeapTuplesPerPage];
 
-		xlrec.isCatalogRel = RelationIsAccessibleInLogicalDecoding(relation);
-		xlrec.nredirected = prstate.nredirected;
-		xlrec.ndead = prstate.ndead;
-		xlrec.nunused = prstate.nunused;
-		xlrec.nplans = 0;
+		xlrec.flags = 0;
+
+		if (RelationIsAccessibleInLogicalDecoding(relation))
+			xlrec.flags |= XLHP_IS_CATALOG_REL;
 
 		/*
 		 * The snapshotConflictHorizon for the whole record should be the most
@@ -721,8 +725,11 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer,
 		 * Destructively sorts tuples array in-place.
 		 */
 		if (do_freeze)
-			xlrec.nplans = heap_log_freeze_plan(frozen,
-												presult->nfrozen, plans, offsets);
+			nplans = heap_log_freeze_plan(frozen,
+										  presult->nfrozen, plans,
+										  frz_offsets);
+		if (nplans > 0)
+			xlrec.flags |= XLHP_HAS_FREEZE_PLANS;
 
 		XLogBeginInsert();
 		XLogRegisterData((char *) &xlrec, SizeOfHeapPrune);
@@ -734,26 +741,71 @@ heap_page_prune_and_freeze(Relation relation, Buffer buffer,
 		 * pretend that they are.  When XLogInsert stores the whole buffer,
 		 * the offset arrays need not be stored too.
 		 */
-		if (xlrec.nplans > 0)
+		if (nplans > 0)
+		{
+			freeze = (xlhp_freeze)
+			{
+				.nplans = nplans
+			};
+
+			XLogRegisterBufData(0, (char *) &freeze, offsetof(xlhp_freeze, plans));
+
 			XLogRegisterBufData(0, (char *) plans,
-								xlrec.nplans * sizeof(xl_heap_freeze_plan));
+								sizeof(xl_heap_freeze_plan) * freeze.nplans);
+		}
+
 
 		if (prstate.nredirected > 0)
+		{
+			xlrec.flags |= XLHP_HAS_REDIRECTIONS;
+
+			redirect = (xlhp_prune_items)
+			{
+				.ntargets = prstate.nredirected
+			};
+
+			XLogRegisterBufData(0, (char *) &redirect,
+								offsetof(xlhp_prune_items, data));
+
 			XLogRegisterBufData(0, (char *) prstate.redirected,
-								prstate.nredirected *
-								sizeof(OffsetNumber) * 2);
+								sizeof(OffsetNumber[2]) * prstate.nredirected);
+		}
 
 		if (prstate.ndead > 0)
+		{
+			xlrec.flags |= XLHP_HAS_DEAD_ITEMS;
+
+			dead = (xlhp_prune_items)
+			{
+				.ntargets = prstate.ndead
+			};
+
+			XLogRegisterBufData(0, (char *) &dead,
+								offsetof(xlhp_prune_items, data));
+
 			XLogRegisterBufData(0, (char *) prstate.nowdead,
-								prstate.ndead * sizeof(OffsetNumber));
+								sizeof(OffsetNumber) * dead.ntargets);
+		}
 
 		if (prstate.nunused > 0)
+		{
+			xlrec.flags |= XLHP_HAS_NOW_UNUSED_ITEMS;
+
+			unused = (xlhp_prune_items)
+			{
+				.ntargets = prstate.nunused
+			};
+
+			XLogRegisterBufData(0, (char *) &unused,
+								offsetof(xlhp_prune_items, data));
+
 			XLogRegisterBufData(0, (char *) prstate.nowunused,
-								prstate.nunused * sizeof(OffsetNumber));
+								sizeof(OffsetNumber) * unused.ntargets);
+		}
 
-		if (xlrec.nplans > 0)
-			XLogRegisterBufData(0, (char *) offsets,
-								presult->nfrozen * sizeof(OffsetNumber));
+		if (nplans > 0)
+			XLogRegisterBufData(0, (char *) frz_offsets,
+								sizeof(OffsetNumber) * presult->nfrozen);
 
 		recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_PRUNE);
 
diff --git a/src/backend/access/rmgrdesc/heapdesc.c b/src/backend/access/rmgrdesc/heapdesc.c
index 36a3d83c8c2..462b0d74f80 100644
--- a/src/backend/access/rmgrdesc/heapdesc.c
+++ b/src/backend/access/rmgrdesc/heapdesc.c
@@ -179,43 +179,109 @@ heap2_desc(StringInfo buf, XLogReaderState *record)
 	{
 		xl_heap_prune *xlrec = (xl_heap_prune *) rec;
 
-		appendStringInfo(buf, "snapshotConflictHorizon: %u, nredirected: %u, ndead: %u, isCatalogRel: %c",
+		appendStringInfo(buf, "snapshotConflictHorizon: %u, isCatalogRel: %c",
 						 xlrec->snapshotConflictHorizon,
-						 xlrec->nredirected,
-						 xlrec->ndead,
-						 xlrec->isCatalogRel ? 'T' : 'F');
+						 xlrec->flags & XLHP_IS_CATALOG_REL ? 'T' : 'F');
 
 		if (XLogRecHasBlockData(record, 0))
 		{
-			OffsetNumber *end;
-			OffsetNumber *redirected;
-			OffsetNumber *nowdead;
-			OffsetNumber *nowunused;
-			int			nredirected;
-			int			nunused;
 			Size		datalen;
-
-			redirected = (OffsetNumber *) XLogRecGetBlockData(record, 0,
-															  &datalen);
-
-			nredirected = xlrec->nredirected;
-			end = (OffsetNumber *) ((char *) redirected + datalen);
-			nowdead = redirected + (nredirected * 2);
-			nowunused = nowdead + xlrec->ndead;
-			nunused = (end - nowunused);
-			Assert(nunused >= 0);
-
-			appendStringInfo(buf, ", nunused: %d", nunused);
-
-			appendStringInfoString(buf, ", redirected:");
-			array_desc(buf, redirected, sizeof(OffsetNumber) * 2,
-					   nredirected, &redirect_elem_desc, NULL);
-			appendStringInfoString(buf, ", dead:");
-			array_desc(buf, nowdead, sizeof(OffsetNumber), xlrec->ndead,
-					   &offset_elem_desc, NULL);
-			appendStringInfoString(buf, ", unused:");
-			array_desc(buf, nowunused, sizeof(OffsetNumber), nunused,
-					   &offset_elem_desc, NULL);
+			OffsetNumber *redirected = NULL;
+			OffsetNumber *nowdead = NULL;
+			OffsetNumber *nowunused = NULL;
+			int			nredirected = 0;
+			int			nunused = 0;
+			int			ndead = 0;
+			int			nplans = 0;
+			xl_heap_freeze_plan *plans = NULL;
+			OffsetNumber *frz_offsets;
+
+			char	   *cursor = XLogRecGetBlockData(record, 0, &datalen);
+
+			if (xlrec->flags & XLHP_HAS_FREEZE_PLANS)
+			{
+				xlhp_freeze *freeze = (xlhp_freeze *) cursor;
+
+				nplans = freeze->nplans;
+				Assert(nplans > 0);
+				plans = freeze->plans;
+
+				cursor += offsetof(xlhp_freeze, plans);
+				cursor += sizeof(xl_heap_freeze_plan) * freeze->nplans;
+			}
+
+			if (xlrec->flags & XLHP_HAS_REDIRECTIONS)
+			{
+				xlhp_prune_items *subrecord = (xlhp_prune_items *) cursor;
+
+				nredirected = subrecord->ntargets;
+				Assert(nredirected > 0);
+				redirected = &subrecord->data[0];
+
+				cursor += offsetof(xlhp_prune_items, data);
+				cursor += sizeof(OffsetNumber[2]) * nredirected;
+			}
+
+			if (xlrec->flags & XLHP_HAS_DEAD_ITEMS)
+			{
+				xlhp_prune_items *subrecord = (xlhp_prune_items *) cursor;
+
+				ndead = subrecord->ntargets;
+				Assert(ndead > 0);
+				nowdead = subrecord->data;
+
+				cursor += offsetof(xlhp_prune_items, data);
+				cursor += sizeof(OffsetNumber) * ndead;
+			}
+
+			if (xlrec->flags & XLHP_HAS_NOW_UNUSED_ITEMS)
+			{
+				xlhp_prune_items *subrecord = (xlhp_prune_items *) cursor;
+
+				nunused = subrecord->ntargets;
+				Assert(nunused > 0);
+				nowunused = subrecord->data;
+
+				cursor += offsetof(xlhp_prune_items, data);
+				cursor += sizeof(OffsetNumber) * nunused;
+			}
+
+			if (nplans > 0)
+				frz_offsets = (OffsetNumber *) cursor;
+
+			appendStringInfo(buf, ", nredirected: %u, ndead: %u, nunused: %u, nplans: %u,",
+							 nredirected,
+							 ndead,
+							 nunused,
+							 nplans);
+
+			if (nredirected > 0)
+			{
+				appendStringInfoString(buf, ", redirected:");
+				array_desc(buf, redirected, sizeof(OffsetNumber) * 2,
+						   nredirected, &redirect_elem_desc, NULL);
+			}
+
+			if (ndead > 0)
+			{
+				appendStringInfoString(buf, ", dead:");
+				array_desc(buf, nowdead, sizeof(OffsetNumber), ndead,
+						   &offset_elem_desc, NULL);
+			}
+
+			if (nunused > 0)
+			{
+				appendStringInfoString(buf, ", unused:");
+				array_desc(buf, nowunused, sizeof(OffsetNumber), nunused,
+						   &offset_elem_desc, NULL);
+			}
+
+			if (nplans > 0)
+			{
+				appendStringInfoString(buf, ", plans:");
+				array_desc(buf, plans, sizeof(xl_heap_freeze_plan), nplans,
+						   &plan_elem_desc, &frz_offsets);
+			}
 		}
 	}
 	else if (info == XLOG_HEAP2_VACUUM)
diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h
index 22f236bb52a..bebd93422d5 100644
--- a/src/include/access/heapam_xlog.h
+++ b/src/include/access/heapam_xlog.h
@@ -227,42 +227,84 @@ typedef struct xl_heap_update
 #define SizeOfHeapUpdate	(offsetof(xl_heap_update, new_offnum) + sizeof(OffsetNumber))
 
 /*
- * This is what we need to know about page pruning (both during VACUUM and
- * during opportunistic pruning)
+ * XXX: As of Postgres 17, XLOG_HEAP2_PRUNE records replace
+ * XLOG_HEAP2_FREEZE_PAGE record types
+ */
+
+/*
+ * This is what we need to know about page pruning and freezing, both during
+ * VACUUM and during opportunistic pruning.
  *
- * The array of OffsetNumbers following the fixed part of the record contains:
- *	* for each freeze plan: the freeze plan
- *	* for each redirected item: the item offset, then the offset redirected to
- *	* for each now-dead item: the item offset
- *	* for each now-unused item: the item offset
- *	* for each tuple frozen by the freeze plans: the offset of the item corresponding to that tuple
- * The total number of OffsetNumbers is therefore
- * (2*nredirected) + ndead + nunused + (sum[plan.ntuples for plan in plans])
+ * If XLPH_HAS_REDIRECTIONS, XLHP_HAS_DEAD_ITEMS, or XLHP_HAS_NOW_UNUSED is set,
+ * acquires a full cleanup lock. Otherwise an ordinary exclusive lock is
+ * enough. This can happen if freezing was the only modification to the page.
  *
- * Acquires a full cleanup lock.
+ * The data for block reference 0 contains "sub-records" depending on which
+ * of the XLHP_HAS_* flags are set. See xlhp_* struct definitions below.
+ *
+ * The layout is in the same order as the XLHP_* flags.
  */
 typedef struct xl_heap_prune
 {
 	TransactionId snapshotConflictHorizon;
-	uint16		nplans;
-	uint16		nredirected;
-	uint16		ndead;
-	uint16		nunused;
-	bool		isCatalogRel;	/* to handle recovery conflict during logical
-								 * decoding on standby */
-	/*
-	 * OFFSET NUMBERS and freeze plans are in the block reference 0 in the
-	 * following order:
-	 *
-	 *		* xl_heap_freeze_plan plans[nplans];
-	 * 		* OffsetNumber redirected[2 * nredirected];
-	 * 		* OffsetNumber nowdead[ndead];
-	 *		* OffsetNumber nowunused[nunused];
-	 * 		* OffsetNumber frz_offsets[...];
-	 */
+	uint8		flags;
 } xl_heap_prune;
 
-#define SizeOfHeapPrune (offsetof(xl_heap_prune, isCatalogRel) + sizeof(bool))
+#define		XLHP_IS_CATALOG_REL			0x01	/* to handle recovery conflict
+												 * during logical decoding on
+												 * standby */
+#define		XLHP_HAS_FREEZE_PLANS		0x02
+#define		XLHP_HAS_REDIRECTIONS		0x04
+#define		XLHP_HAS_DEAD_ITEMS	        0x08
+#define		XLHP_HAS_NOW_UNUSED_ITEMS   0x10
+
+#define SizeOfHeapPrune (offsetof(xl_heap_prune, flags) + sizeof(uint8))
+
+/*
+ * This struct represents a 'freeze plan', which describes how to freeze a
+ * group of one or more heap tuples (appears in xl_heap_freeze_page and
+ * xl_heap_prune's xlhp_freeze records)
+ */
+/* 0x01 was XLH_FREEZE_XMIN */
+#define		XLH_FREEZE_XVAC		0x02
+#define		XLH_INVALID_XVAC	0x04
+
+typedef struct xl_heap_freeze_plan
+{
+	TransactionId xmax;
+	uint16		t_infomask2;
+	uint16		t_infomask;
+	uint8		frzflags;
+
+	/* Length of individual page offset numbers array for this plan */
+	uint16		ntuples;
+} xl_heap_freeze_plan;
+
+/*
+ * This is what we need to know about a block being frozen during vacuum
+ *
+ * Backup block 0's data contains an array of xl_heap_freeze_plan structs
+ * (with nplans elements), followed by one or more page offset number arrays.
+ * Each such page offset number array corresponds to a single freeze plan
+ * (REDO routine freezes corresponding heap tuples using freeze plan).
+ */
+typedef struct xlhp_freeze
+{
+	uint16		nplans;
+	xl_heap_freeze_plan plans[FLEXIBLE_ARRAY_MEMBER];
+} xlhp_freeze;
+
+/*
+ * Sub-record type contained in block reference 0 of a prune record if
+ * XLHP_HAS_REDIRECTIONS/XLHP_HAS_DEAD_ITEMS/XLHP_HAS_NOW_UNUSED_ITEMS is set.
+ * Note that in the XLHP_HAS_REDIRECTIONS variant, there are actually 2 *
+ * length number of OffsetNumbers in the data.
+ */
+typedef struct xlhp_prune_items
+{
+	uint16		ntargets;
+	OffsetNumber data[FLEXIBLE_ARRAY_MEMBER];
+} xlhp_prune_items;
 
 /*
  * The vacuum page record is similar to the prune record, but can only mark
@@ -326,26 +368,6 @@ typedef struct xl_heap_inplace
 } xl_heap_inplace;
 
 #define SizeOfHeapInplace	(offsetof(xl_heap_inplace, offnum) + sizeof(OffsetNumber))
-
-/*
- * This struct represents a 'freeze plan', which describes how to freeze a
- * group of one or more heap tuples (appears in xl_heap_freeze_page record)
- */
-/* 0x01 was XLH_FREEZE_XMIN */
-#define		XLH_FREEZE_XVAC		0x02
-#define		XLH_INVALID_XVAC	0x04
-
-typedef struct xl_heap_freeze_plan
-{
-	TransactionId xmax;
-	uint16		t_infomask2;
-	uint16		t_infomask;
-	uint8		frzflags;
-
-	/* Length of individual page offset numbers array for this plan */
-	uint16		ntuples;
-} xl_heap_freeze_plan;
-
 /*
  * This is what we need to know about a block being frozen during vacuum
  *
@@ -353,6 +375,10 @@ typedef struct xl_heap_freeze_plan
  * (with nplans elements), followed by one or more page offset number arrays.
  * Each such page offset number array corresponds to a single freeze plan
  * (REDO routine freezes corresponding heap tuples using freeze plan).
+ *
+ * This is for backwards compatability for reading individual freeze records.
+ * As of Postgres 17, xl_heap_freeze_plan records occur in xl_heap_prune
+ * records.
  */
 typedef struct xl_heap_freeze_page
 {
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 1c1a4d305d6..2702f211d90 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -4002,6 +4002,8 @@ xl_xact_stats_items
 xl_xact_subxacts
 xl_xact_twophase
 xl_xact_xinfo
+xlhp_freeze
+xlhp_prune_items
 xmlBuffer
 xmlBufferPtr
 xmlChar
-- 
2.40.1

