From d6c08a8426907292a352d85b8fecc12d3adc8cbc Mon Sep 17 00:00:00 2001
From: Tomas Vondra <tomas.vondra@postgresql.org>
Date: Tue, 28 Nov 2023 18:48:21 +0100
Subject: [PATCH v20231128 4/4] pgindent

---
 src/backend/access/brin/brin.c             | 96 +++++++++++-----------
 src/backend/utils/sort/tuplesortvariants.c | 16 ++--
 src/tools/pgindent/typedefs.list           |  5 ++
 3 files changed, 62 insertions(+), 55 deletions(-)

diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index 8d96d2ac9be..001cf04aac5 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -67,14 +67,14 @@ typedef struct BrinSpool
 typedef struct BrinShared
 {
 	/*
-	 * These fields are not modified during the build.  They primarily exist for
-	 * the benefit of worker processes that need to create state corresponding
-	 * to that used by the leader.
+	 * These fields are not modified during the build.  They primarily exist
+	 * for the benefit of worker processes that need to create state
+	 * corresponding to that used by the leader.
 	 */
 	Oid			heaprelid;
 	Oid			indexrelid;
 	bool		isconcurrent;
-	BlockNumber	pagesPerRange;
+	BlockNumber pagesPerRange;
 	int			scantuplesortstates;
 
 	/*
@@ -145,9 +145,10 @@ typedef struct BrinLeader
 	 *
 	 * brinshared is the shared state for entire build.  sharedsort is the
 	 * shared, tuplesort-managed state passed to each process tuplesort.
-	 * snapshot is the snapshot used by the scan iff an MVCC snapshot is required.
+	 * snapshot is the snapshot used by the scan iff an MVCC snapshot is
+	 * required.
 	 */
-	BrinShared	   *brinshared;
+	BrinShared *brinshared;
 	Sharedsort *sharedsort;
 	Snapshot	snapshot;
 	WalUsage   *walusage;
@@ -1035,8 +1036,8 @@ brinbuildCallbackParallel(Relation index,
 	/*
 	 * If we're in a block that belongs to a future range, summarize what
 	 * we've got and start afresh.  Note the scan might have skipped many
-	 * pages, if they were devoid of live tuples; we do not create emptry
-	 * BRIN ranges here - the leader is responsible for filling them in.
+	 * pages, if they were devoid of live tuples; we do not create emptry BRIN
+	 * ranges here - the leader is responsible for filling them in.
 	 */
 	if (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1)
 	{
@@ -1169,23 +1170,24 @@ brinbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 		/*
 		 * Begin serial/leader tuplesort.
 		 *
-		 * In cases where parallelism is involved, the leader receives the same
-		 * share of maintenance_work_mem as a serial sort (it is generally treated
-		 * in the same way as a serial sort once we return).  Parallel worker
-		 * Tuplesortstates will have received only a fraction of
-		 * maintenance_work_mem, though.
+		 * In cases where parallelism is involved, the leader receives the
+		 * same share of maintenance_work_mem as a serial sort (it is
+		 * generally treated in the same way as a serial sort once we return).
+		 * Parallel worker Tuplesortstates will have received only a fraction
+		 * of maintenance_work_mem, though.
 		 *
 		 * We rely on the lifetime of the Leader Tuplesortstate almost not
-		 * overlapping with any worker Tuplesortstate's lifetime.  There may be
-		 * some small overlap, but that's okay because we rely on leader
-		 * Tuplesortstate only allocating a small, fixed amount of memory here.
-		 * When its tuplesort_performsort() is called (by our caller), and
-		 * significant amounts of memory are likely to be used, all workers must
-		 * have already freed almost all memory held by their Tuplesortstates
-		 * (they are about to go away completely, too).  The overall effect is
-		 * that maintenance_work_mem always represents an absolute high watermark
-		 * on the amount of memory used by a CREATE INDEX operation, regardless of
-		 * the use of parallelism or any other factor.
+		 * overlapping with any worker Tuplesortstate's lifetime.  There may
+		 * be some small overlap, but that's okay because we rely on leader
+		 * Tuplesortstate only allocating a small, fixed amount of memory
+		 * here. When its tuplesort_performsort() is called (by our caller),
+		 * and significant amounts of memory are likely to be used, all
+		 * workers must have already freed almost all memory held by their
+		 * Tuplesortstates (they are about to go away completely, too).  The
+		 * overall effect is that maintenance_work_mem always represents an
+		 * absolute high watermark on the amount of memory used by a CREATE
+		 * INDEX operation, regardless of the use of parallelism or any other
+		 * factor.
 		 */
 		state->bs_spool->sortstate =
 			tuplesort_begin_index_brin(heap, index,
@@ -1198,7 +1200,7 @@ brinbuild(Relation heap, Relation index, IndexInfo *indexInfo)
 		 */
 		_brin_end_parallel(state->bs_leader, state);
 	}
-	else	/* no parallel index build, just do the usual thing */
+	else						/* no parallel index build */
 	{
 		reltuples = table_index_build_scan(heap, index, indexInfo, false, true,
 										   brinbuildCallback, (void *) state, NULL);
@@ -2288,9 +2290,9 @@ _brin_begin_parallel(BrinBuildState *buildstate, Relation heap, Relation index,
 	Snapshot	snapshot;
 	Size		estbrinshared;
 	Size		estsort;
-	BrinShared   *brinshared;
-	Sharedsort   *sharedsort;
-	BrinLeader   *brinleader = (BrinLeader *) palloc0(sizeof(BrinLeader));
+	BrinShared *brinshared;
+	Sharedsort *sharedsort;
+	BrinLeader *brinleader = (BrinLeader *) palloc0(sizeof(BrinLeader));
 	WalUsage   *walusage;
 	BufferUsage *bufferusage;
 	bool		leaderparticipates = true;
@@ -2471,12 +2473,12 @@ _brin_end_parallel(BrinLeader *brinleader, BrinBuildState *state)
 	BrinMemTuple *memtuple = NULL;
 	Size		tuplen;
 	BrinShared *brinshared = brinleader->brinshared;
-	BlockNumber	prevblkno = InvalidBlockNumber;
+	BlockNumber prevblkno = InvalidBlockNumber;
 	BrinTuple  *emptyTuple = NULL;
 	Size		emptySize;
 	BrinSpool  *spool;
-	MemoryContext	rangeCxt,
-					oldCxt;
+	MemoryContext rangeCxt,
+				oldCxt;
 
 	/* Shutdown worker processes */
 	WaitForParallelWorkersToFinish(brinleader->pcxt);
@@ -2493,18 +2495,18 @@ _brin_end_parallel(BrinLeader *brinleader, BrinBuildState *state)
 	tuplesort_performsort(spool->sortstate);
 
 	/*
-	 * Initialize BrinMemTuple we'll use to union summaries from workers
-	 * (in case they happened to produce parts of the same paga range).
+	 * Initialize BrinMemTuple we'll use to union summaries from workers (in
+	 * case they happened to produce parts of the same paga range).
 	 */
 	memtuple = brin_new_memtuple(state->bs_bdesc);
 
 	/*
 	 * Create a memory context we'll reset to combine results for a single
-	 * page range (received from the workers). We don't expect huge number
-	 * of overlaps under regular circumstances, because for large tables
-	 * the chunk size is likely larger than the BRIN page range), but it
-	 * can happen, and the union functions may do all kinds of stuff. So
-	 * we better reset the context once in a while.
+	 * page range (received from the workers). We don't expect huge number of
+	 * overlaps under regular circumstances, because for large tables the
+	 * chunk size is likely larger than the BRIN page range), but it can
+	 * happen, and the union functions may do all kinds of stuff. So we better
+	 * reset the context once in a while.
 	 */
 	rangeCxt = AllocSetContextCreate(CurrentMemoryContext,
 									 "brin union",
@@ -2513,8 +2515,8 @@ _brin_end_parallel(BrinLeader *brinleader, BrinBuildState *state)
 
 	/*
 	 * Read the BRIN tuples from the shared tuplesort, sorted by block number.
-	 * That probably gives us an index that is cheaper to scan, thanks to mostly
-	 * getting data from the same index page as before.
+	 * That probably gives us an index that is cheaper to scan, thanks to
+	 * mostly getting data from the same index page as before.
 	 */
 	while ((btup = tuplesort_getbrintuple(spool->sortstate, &tuplen, true)) != NULL)
 	{
@@ -2528,15 +2530,15 @@ _brin_end_parallel(BrinLeader *brinleader, BrinBuildState *state)
 		 * the memtuple, and continue with the next one from tuplesort. We
 		 * however may need to insert empty summaries into the index.
 		 *
-		 * If it's the same block as the last we saw, we simply union the
-		 * brin tuple into it, and we're done - we don't even need to insert
-		 * empty ranges, because that was done earlier when we saw the first
-		 * brin tuple (for this range).
+		 * If it's the same block as the last we saw, we simply union the brin
+		 * tuple into it, and we're done - we don't even need to insert empty
+		 * ranges, because that was done earlier when we saw the first brin
+		 * tuple (for this range).
 		 *
 		 * Finally, if it's not the first brin tuple, and it's not the same
-		 * page range, we need to do the insert and then deform the tuple
-		 * into the memtuple. Then we'll insert empty ranges before the
-		 * new brin tuple, if needed.
+		 * page range, we need to do the insert and then deform the tuple into
+		 * the memtuple. Then we'll insert empty ranges before the new brin
+		 * tuple, if needed.
 		 */
 		if (prevblkno == InvalidBlockNumber)
 		{
@@ -2730,7 +2732,7 @@ _brin_parallel_scan_and_build(BrinBuildState *state, BrinSpool *brinspool,
 							  bool progress)
 {
 	SortCoordinate coordinate;
-	TableScanDesc	scan;
+	TableScanDesc scan;
 	double		reltuples;
 	IndexInfo  *indexInfo;
 
diff --git a/src/backend/utils/sort/tuplesortvariants.c b/src/backend/utils/sort/tuplesortvariants.c
index 9b3a70e6ccf..90fc605f1ca 100644
--- a/src/backend/utils/sort/tuplesortvariants.c
+++ b/src/backend/utils/sort/tuplesortvariants.c
@@ -965,8 +965,8 @@ tuplesort_getbrintuple(Tuplesortstate *state, Size *len, bool forward)
 {
 	TuplesortPublic *base = TuplesortstateGetPublic(state);
 	MemoryContext oldcontext = MemoryContextSwitchTo(base->sortcontext);
-	SortTuple		stup;
-	BrinSortTuple  *btup;
+	SortTuple	stup;
+	BrinSortTuple *btup;
 
 	if (!tuplesort_gettuple_common(state, forward, &stup))
 		stup.tuple = NULL;
@@ -1708,7 +1708,7 @@ removeabbrev_index_brin(Tuplesortstate *state, SortTuple *stups, int count)
 
 	for (i = 0; i < count; i++)
 	{
-		BrinSortTuple   *tuple;
+		BrinSortTuple *tuple;
 
 		tuple = stups[i].tuple;
 		stups[i].datum1 = tuple->tuple.bt_blkno;
@@ -1735,8 +1735,8 @@ static void
 writetup_index_brin(Tuplesortstate *state, LogicalTape *tape, SortTuple *stup)
 {
 	TuplesortPublic *base = TuplesortstateGetPublic(state);
-	BrinSortTuple  *tuple = (BrinSortTuple *) stup->tuple;
-	unsigned int	tuplen = tuple->tuplen;
+	BrinSortTuple *tuple = (BrinSortTuple *) stup->tuple;
+	unsigned int tuplen = tuple->tuplen;
 
 	tuplen = tuplen + sizeof(tuplen);
 	LogicalTapeWrite(tape, &tuplen, sizeof(tuplen));
@@ -1749,6 +1749,7 @@ static void
 readtup_index_brin(Tuplesortstate *state, SortTuple *stup,
 				   LogicalTape *tape, unsigned int len)
 {
+	BrinSortTuple *tuple;
 	TuplesortPublic *base = TuplesortstateGetPublic(state);
 	unsigned int tuplen = len - sizeof(unsigned int);
 
@@ -1756,9 +1757,8 @@ readtup_index_brin(Tuplesortstate *state, SortTuple *stup,
 	 * Allocate space for the BRIN sort tuple, which is BrinTuple with an
 	 * extra length field.
 	 */
-	BrinSortTuple *tuple
-		= (BrinSortTuple *) tuplesort_readtup_alloc(state,
-													BRINSORTTUPLE_SIZE(tuplen));
+	tuple = (BrinSortTuple *) tuplesort_readtup_alloc(state,
+													  BRINSORTTUPLE_SIZE(tuplen));
 
 	tuple->tuplen = tuplen;
 
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 86a9886d4f7..001fef58652 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -297,13 +297,17 @@ BpChar
 BrinBuildState
 BrinDesc
 BrinInsertState
+BrinLeader
 BrinMemTuple
 BrinMetaPageData
 BrinOpaque
 BrinOpcInfo
 BrinOptions
 BrinRevmap
+BrinShared
+BrinSortTuple
 BrinSpecialSpace
+BrinSpool
 BrinStatsData
 BrinTuple
 BrinValues
@@ -2879,6 +2883,7 @@ TupleTableSlotOps
 TuplesortClusterArg
 TuplesortDatumArg
 TuplesortIndexArg
+TuplesortIndexBrinArg
 TuplesortIndexBTreeArg
 TuplesortIndexHashArg
 TuplesortInstrumentation
-- 
2.42.0

