From 8ee9ffc1325118438309ee25e9b33c61cccd022f Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryzbyj@telsasoft.com>
Date: Sun, 26 Jan 2020 22:38:10 -0600
Subject: [PATCH v14 1/3] make vacrelstats a global

---
 src/backend/access/heap/vacuumlazy.c | 276 +++++++++++++++++------------------
 1 file changed, 136 insertions(+), 140 deletions(-)

diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 8ce5011..114428b 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -302,16 +302,17 @@ static MultiXactId MultiXactCutoff;
 
 static BufferAccessStrategy vac_strategy;
 
+LVRelStats vacrelstats = {0};
 
 /* non-export function prototypes */
 static void lazy_scan_heap(Relation onerel, VacuumParams *params,
-						   LVRelStats *vacrelstats, Relation *Irel, int nindexes,
+						   Relation *Irel, int nindexes,
 						   bool aggressive);
-static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
+static void lazy_vacuum_heap(Relation onerel);
 static bool lazy_check_needs_freeze(Buffer buf, bool *hastup);
 static void lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 									IndexBulkDeleteResult **stats,
-									LVRelStats *vacrelstats, LVParallelState *lps,
+									LVParallelState *lps,
 									int nindexes);
 static void lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
 							  LVDeadTuples *dead_tuples, double reltuples);
@@ -319,13 +320,11 @@ static void lazy_cleanup_index(Relation indrel,
 							   IndexBulkDeleteResult **stats,
 							   double reltuples, bool estimated_count);
 static int	lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
-							 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
-static bool should_attempt_truncation(VacuumParams *params,
-									  LVRelStats *vacrelstats);
-static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
-static BlockNumber count_nondeletable_pages(Relation onerel,
-											LVRelStats *vacrelstats);
-static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
+							 int tupindex, Buffer *vmbuffer);
+static bool should_attempt_truncation(VacuumParams *params);
+static void lazy_truncate_heap(Relation onerel);
+static BlockNumber count_nondeletable_pages(Relation onerel);
+static void lazy_space_alloc(BlockNumber relblocks);
 static void lazy_record_dead_tuple(LVDeadTuples *dead_tuples,
 								   ItemPointer itemptr);
 static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
@@ -333,19 +332,19 @@ static int	vac_cmp_itemptr(const void *left, const void *right);
 static bool heap_page_is_all_visible(Relation rel, Buffer buf,
 									 TransactionId *visibility_cutoff_xid, bool *all_frozen);
 static void lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-										 LVRelStats *vacrelstats, LVParallelState *lps,
+										 LVParallelState *lps,
 										 int nindexes);
 static void parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
 								  LVShared *lvshared, LVDeadTuples *dead_tuples,
 								  int nindexes);
 static void vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
-								  LVRelStats *vacrelstats, LVParallelState *lps,
+								  LVParallelState *lps,
 								  int nindexes);
 static void vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
 							 LVShared *lvshared, LVSharedIndStats *shared_indstats,
 							 LVDeadTuples *dead_tuples);
 static void lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-									 LVRelStats *vacrelstats, LVParallelState *lps,
+									 LVParallelState *lps,
 									 int nindexes);
 static long compute_max_dead_tuples(BlockNumber relblocks, bool hasindex);
 static int	compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested,
@@ -355,7 +354,7 @@ static void prepare_index_statistics(LVShared *lvshared, bool *can_parallel_vacu
 static void update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
 									int nindexes);
 static LVParallelState *begin_parallel_vacuum(Oid relid, Relation *Irel,
-											  LVRelStats *vacrelstats, BlockNumber nblocks,
+											  BlockNumber nblocks,
 											  int nindexes, int nrequested);
 static void end_parallel_vacuum(Relation *Irel, IndexBulkDeleteResult **stats,
 								LVParallelState *lps, int nindexes);
@@ -376,7 +375,6 @@ void
 heap_vacuum_rel(Relation onerel, VacuumParams *params,
 				BufferAccessStrategy bstrategy)
 {
-	LVRelStats *vacrelstats;
 	Relation   *Irel;
 	int			nindexes;
 	PGRUsage	ru0;
@@ -458,21 +456,20 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 		return;
 	}
 
-	vacrelstats = (LVRelStats *) palloc0(sizeof(LVRelStats));
-
-	vacrelstats->old_rel_pages = onerel->rd_rel->relpages;
-	vacrelstats->old_live_tuples = onerel->rd_rel->reltuples;
-	vacrelstats->num_index_scans = 0;
-	vacrelstats->pages_removed = 0;
-	vacrelstats->lock_waiter_detected = false;
+	memset(&vacrelstats, 0, sizeof(vacrelstats));
+	vacrelstats.old_rel_pages = onerel->rd_rel->relpages;
+	vacrelstats.old_live_tuples = onerel->rd_rel->reltuples;
+	// vacrelstats.num_index_scans = 0;
+	// vacrelstats.pages_removed = 0;
+	// vacrelstats.lock_waiter_detected = false;
 
 	/* Open all indexes of the relation */
 	vac_open_indexes(onerel, RowExclusiveLock, &nindexes, &Irel);
-	vacrelstats->useindex = (nindexes > 0 &&
+	vacrelstats.useindex = (nindexes > 0 &&
 							 params->index_cleanup == VACOPT_TERNARY_ENABLED);
 
 	/* Do the vacuuming */
-	lazy_scan_heap(onerel, params, vacrelstats, Irel, nindexes, aggressive);
+	lazy_scan_heap(onerel, params, Irel, nindexes, aggressive);
 
 	/* Done with indexes */
 	vac_close_indexes(nindexes, Irel, NoLock);
@@ -484,8 +481,8 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 	 * NB: We need to check this before truncating the relation, because that
 	 * will change ->rel_pages.
 	 */
-	if ((vacrelstats->scanned_pages + vacrelstats->frozenskipped_pages)
-		< vacrelstats->rel_pages)
+	if ((vacrelstats.scanned_pages + vacrelstats.frozenskipped_pages)
+		< vacrelstats.rel_pages)
 	{
 		Assert(!aggressive);
 		scanned_all_unfrozen = false;
@@ -496,8 +493,8 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 	/*
 	 * Optionally truncate the relation.
 	 */
-	if (should_attempt_truncation(params, vacrelstats))
-		lazy_truncate_heap(onerel, vacrelstats);
+	if (should_attempt_truncation(params))
+		lazy_truncate_heap(onerel);
 
 	/* Report that we are now doing final cleanup */
 	pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
@@ -524,12 +521,12 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 	 * Also, don't change relfrozenxid/relminmxid if we skipped any pages,
 	 * since then we don't know for certain that all tuples have a newer xmin.
 	 */
-	new_rel_pages = vacrelstats->rel_pages;
-	new_live_tuples = vacrelstats->new_live_tuples;
-	if (vacrelstats->tupcount_pages == 0 && new_rel_pages > 0)
+	new_rel_pages = vacrelstats.rel_pages;
+	new_live_tuples = vacrelstats.new_live_tuples;
+	if (vacrelstats.tupcount_pages == 0 && new_rel_pages > 0)
 	{
-		new_rel_pages = vacrelstats->old_rel_pages;
-		new_live_tuples = vacrelstats->old_live_tuples;
+		new_rel_pages = vacrelstats.old_rel_pages;
+		new_live_tuples = vacrelstats.old_live_tuples;
 	}
 
 	visibilitymap_count(onerel, &new_rel_allvisible, NULL);
@@ -552,7 +549,7 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 	pgstat_report_vacuum(RelationGetRelid(onerel),
 						 onerel->rd_rel->relisshared,
 						 new_live_tuples,
-						 vacrelstats->new_dead_tuples);
+						 vacrelstats.new_dead_tuples);
 	pgstat_progress_end_command();
 
 	/* and log the action if appropriate */
@@ -601,17 +598,17 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
 							 get_database_name(MyDatabaseId),
 							 get_namespace_name(RelationGetNamespace(onerel)),
 							 RelationGetRelationName(onerel),
-							 vacrelstats->num_index_scans);
+							 vacrelstats.num_index_scans);
 			appendStringInfo(&buf, _("pages: %u removed, %u remain, %u skipped due to pins, %u skipped frozen\n"),
-							 vacrelstats->pages_removed,
-							 vacrelstats->rel_pages,
-							 vacrelstats->pinskipped_pages,
-							 vacrelstats->frozenskipped_pages);
+							 vacrelstats.pages_removed,
+							 vacrelstats.rel_pages,
+							 vacrelstats.pinskipped_pages,
+							 vacrelstats.frozenskipped_pages);
 			appendStringInfo(&buf,
 							 _("tuples: %.0f removed, %.0f remain, %.0f are dead but not yet removable, oldest xmin: %u\n"),
-							 vacrelstats->tuples_deleted,
-							 vacrelstats->new_rel_tuples,
-							 vacrelstats->new_dead_tuples,
+							 vacrelstats.tuples_deleted,
+							 vacrelstats.new_rel_tuples,
+							 vacrelstats.new_dead_tuples,
 							 OldestXmin);
 			appendStringInfo(&buf,
 							 _("buffer usage: %d hits, %d misses, %d dirtied\n"),
@@ -646,7 +643,7 @@ heap_vacuum_rel(Relation onerel, VacuumParams *params,
  * which would be after the rows have become inaccessible.
  */
 static void
-vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
+vacuum_log_cleanup_info(Relation rel)
 {
 	/*
 	 * Skip this for relations for which no WAL is to be written, or if we're
@@ -658,8 +655,8 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
 	/*
 	 * No need to write the record at all unless it contains a valid value
 	 */
-	if (TransactionIdIsValid(vacrelstats->latestRemovedXid))
-		(void) log_heap_cleanup_info(rel->rd_node, vacrelstats->latestRemovedXid);
+	if (TransactionIdIsValid(vacrelstats.latestRemovedXid))
+		(void) log_heap_cleanup_info(rel->rd_node, vacrelstats.latestRemovedXid);
 }
 
 /*
@@ -691,7 +688,7 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
  *		reference them have been killed.
  */
 static void
-lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
+lazy_scan_heap(Relation onerel, VacuumParams *params,
 			   Relation *Irel, int nindexes, bool aggressive)
 {
 	LVParallelState *lps = NULL;
@@ -747,18 +744,18 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		palloc0(nindexes * sizeof(IndexBulkDeleteResult *));
 
 	nblocks = RelationGetNumberOfBlocks(onerel);
-	vacrelstats->rel_pages = nblocks;
-	vacrelstats->scanned_pages = 0;
-	vacrelstats->tupcount_pages = 0;
-	vacrelstats->nonempty_pages = 0;
-	vacrelstats->latestRemovedXid = InvalidTransactionId;
+	vacrelstats.rel_pages = nblocks;
+	vacrelstats.scanned_pages = 0;
+	vacrelstats.tupcount_pages = 0;
+	vacrelstats.nonempty_pages = 0;
+	vacrelstats.latestRemovedXid = InvalidTransactionId;
 
 	/*
 	 * Initialize the state for a parallel vacuum.  As of now, only one worker
 	 * can be used for an index, so we invoke parallelism only if there are at
 	 * least two indexes on a table.
 	 */
-	if (params->nworkers >= 0 && vacrelstats->useindex && nindexes > 1)
+	if (params->nworkers >= 0 && vacrelstats.useindex && nindexes > 1)
 	{
 		/*
 		 * Since parallel workers cannot access data in temporary tables, we
@@ -777,7 +774,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		}
 		else
 			lps = begin_parallel_vacuum(RelationGetRelid(onerel), Irel,
-										vacrelstats, nblocks, nindexes,
+										nblocks, nindexes,
 										params->nworkers);
 	}
 
@@ -786,9 +783,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	 * initialized.
 	 */
 	if (!ParallelVacuumIsActive(lps))
-		lazy_space_alloc(vacrelstats, nblocks);
+		lazy_space_alloc(nblocks);
 
-	dead_tuples = vacrelstats->dead_tuples;
+	dead_tuples = vacrelstats.dead_tuples;
 	frozen = palloc(sizeof(xl_heap_freeze_tuple) * MaxHeapTuplesPerPage);
 
 	/* Report that we're scanning the heap, advertising total # of blocks */
@@ -889,7 +886,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 
 		/* see note above about forcing scanning of last page */
 #define FORCE_CHECK_PAGE() \
-		(blkno == nblocks - 1 && should_attempt_truncation(params, vacrelstats))
+		(blkno == nblocks - 1 && should_attempt_truncation(params))
 
 		pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_SCANNED, blkno);
 
@@ -960,7 +957,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				 * in this case an approximate answer is OK.
 				 */
 				if (aggressive || VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
-					vacrelstats->frozenskipped_pages++;
+					vacrelstats.frozenskipped_pages++;
 				continue;
 			}
 			all_visible_according_to_vm = true;
@@ -989,10 +986,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 
 			/* Work on all the indexes, then the heap */
 			lazy_vacuum_all_indexes(onerel, Irel, indstats,
-									vacrelstats, lps, nindexes);
-
+									lps, nindexes);
 			/* Remove tuples from heap */
-			lazy_vacuum_heap(onerel, vacrelstats);
+			lazy_vacuum_heap(onerel);
 
 			/*
 			 * Forget the now-vacuumed tuples, and press on, but be careful
@@ -1039,7 +1035,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			if (!aggressive && !FORCE_CHECK_PAGE())
 			{
 				ReleaseBuffer(buf);
-				vacrelstats->pinskipped_pages++;
+				vacrelstats.pinskipped_pages++;
 				continue;
 			}
 
@@ -1063,10 +1059,10 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			if (!lazy_check_needs_freeze(buf, &hastup))
 			{
 				UnlockReleaseBuffer(buf);
-				vacrelstats->scanned_pages++;
-				vacrelstats->pinskipped_pages++;
+				vacrelstats.scanned_pages++;
+				vacrelstats.pinskipped_pages++;
 				if (hastup)
-					vacrelstats->nonempty_pages = blkno + 1;
+					vacrelstats.nonempty_pages = blkno + 1;
 				continue;
 			}
 			if (!aggressive)
@@ -1076,9 +1072,9 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				 * to claiming that the page contains no freezable tuples.
 				 */
 				UnlockReleaseBuffer(buf);
-				vacrelstats->pinskipped_pages++;
+				vacrelstats.pinskipped_pages++;
 				if (hastup)
-					vacrelstats->nonempty_pages = blkno + 1;
+					vacrelstats.nonempty_pages = blkno + 1;
 				continue;
 			}
 			LockBuffer(buf, BUFFER_LOCK_UNLOCK);
@@ -1086,8 +1082,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			/* drop through to normal processing */
 		}
 
-		vacrelstats->scanned_pages++;
-		vacrelstats->tupcount_pages++;
+		vacrelstats.scanned_pages++;
+		vacrelstats.tupcount_pages++;
 
 		page = BufferGetPage(buf);
 
@@ -1184,7 +1180,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		 * We count tuples removed by the pruning step as removed by VACUUM.
 		 */
 		tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
-										 &vacrelstats->latestRemovedXid);
+										 &vacrelstats.latestRemovedXid);
 
 		/*
 		 * Now scan the page to collect vacuumable items and check for tuples
@@ -1381,7 +1377,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			{
 				lazy_record_dead_tuple(dead_tuples, &(tuple.t_self));
 				HeapTupleHeaderAdvanceLatestRemovedXid(tuple.t_data,
-													   &vacrelstats->latestRemovedXid);
+													   &vacrelstats.latestRemovedXid);
 				tups_vacuumed += 1;
 				has_dead_tuples = true;
 			}
@@ -1449,12 +1445,12 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 		 * doing a second scan. Also we don't do that but forget dead tuples
 		 * when index cleanup is disabled.
 		 */
-		if (!vacrelstats->useindex && dead_tuples->num_tuples > 0)
+		if (!vacrelstats.useindex && dead_tuples->num_tuples > 0)
 		{
 			if (nindexes == 0)
 			{
 				/* Remove tuples from heap if the table has no index */
-				lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats, &vmbuffer);
+				lazy_vacuum_page(onerel, blkno, buf, 0, &vmbuffer);
 				vacuumed_pages++;
 				has_dead_tuples = false;
 			}
@@ -1465,7 +1461,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 				 * Instead of vacuuming the dead tuples on the heap, we just
 				 * forget them.
 				 *
-				 * Note that vacrelstats->dead_tuples could have tuples which
+				 * Note that vacrelstats.dead_tuples could have tuples which
 				 * became dead after HOT-pruning but are not marked dead yet.
 				 * We do not process them because it's a very rare condition,
 				 * and the next vacuum will process them anyway.
@@ -1584,7 +1580,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 
 		/* Remember the location of the last page with nonremovable tuples */
 		if (hastup)
-			vacrelstats->nonempty_pages = blkno + 1;
+			vacrelstats.nonempty_pages = blkno + 1;
 
 		/*
 		 * If we remembered any tuples for deletion, then the page will be
@@ -1603,18 +1599,18 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	pfree(frozen);
 
 	/* save stats for use later */
-	vacrelstats->tuples_deleted = tups_vacuumed;
-	vacrelstats->new_dead_tuples = nkeep;
+	vacrelstats.tuples_deleted = tups_vacuumed;
+	vacrelstats.new_dead_tuples = nkeep;
 
 	/* now we can compute the new value for pg_class.reltuples */
-	vacrelstats->new_live_tuples = vac_estimate_reltuples(onerel,
+	vacrelstats.new_live_tuples = vac_estimate_reltuples(onerel,
 														  nblocks,
-														  vacrelstats->tupcount_pages,
+														  vacrelstats.tupcount_pages,
 														  live_tuples);
 
 	/* also compute total number of surviving heap entries */
-	vacrelstats->new_rel_tuples =
-		vacrelstats->new_live_tuples + vacrelstats->new_dead_tuples;
+	vacrelstats.new_rel_tuples =
+		vacrelstats.new_live_tuples + vacrelstats.new_dead_tuples;
 
 	/*
 	 * Release any remaining pin on visibility map page.
@@ -1630,11 +1626,11 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	if (dead_tuples->num_tuples > 0)
 	{
 		/* Work on all the indexes, and then the heap */
-		lazy_vacuum_all_indexes(onerel, Irel, indstats, vacrelstats,
+		lazy_vacuum_all_indexes(onerel, Irel, indstats,
 								lps, nindexes);
 
 		/* Remove tuples from heap */
-		lazy_vacuum_heap(onerel, vacrelstats);
+		lazy_vacuum_heap(onerel);
 	}
 
 	/*
@@ -1648,8 +1644,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 	pgstat_progress_update_param(PROGRESS_VACUUM_HEAP_BLKS_VACUUMED, blkno);
 
 	/* Do post-vacuum cleanup */
-	if (vacrelstats->useindex)
-		lazy_cleanup_all_indexes(Irel, indstats, vacrelstats, lps, nindexes);
+	if (vacrelstats.useindex)
+		lazy_cleanup_all_indexes(Irel, indstats, lps, nindexes);
 
 	/*
 	 * End parallel mode before updating index statistics as we cannot write
@@ -1680,12 +1676,12 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 					 nunused);
 	appendStringInfo(&buf, ngettext("Skipped %u page due to buffer pins, ",
 									"Skipped %u pages due to buffer pins, ",
-									vacrelstats->pinskipped_pages),
-					 vacrelstats->pinskipped_pages);
+									vacrelstats.pinskipped_pages),
+					 vacrelstats.pinskipped_pages);
 	appendStringInfo(&buf, ngettext("%u frozen page.\n",
 									"%u frozen pages.\n",
-									vacrelstats->frozenskipped_pages),
-					 vacrelstats->frozenskipped_pages);
+									vacrelstats.frozenskipped_pages),
+					 vacrelstats.frozenskipped_pages);
 	appendStringInfo(&buf, ngettext("%u page is entirely empty.\n",
 									"%u pages are entirely empty.\n",
 									empty_pages),
@@ -1696,7 +1692,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 			(errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
 					RelationGetRelationName(onerel),
 					tups_vacuumed, num_tuples,
-					vacrelstats->scanned_pages, nblocks),
+					vacrelstats.scanned_pages, nblocks),
 			 errdetail_internal("%s", buf.data)));
 	pfree(buf.data);
 }
@@ -1709,14 +1705,14 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
 static void
 lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 						IndexBulkDeleteResult **stats,
-						LVRelStats *vacrelstats, LVParallelState *lps,
+						LVParallelState *lps,
 						int nindexes)
 {
 	Assert(!IsParallelWorker());
 	Assert(nindexes > 0);
 
 	/* Log cleanup info before we touch indexes */
-	vacuum_log_cleanup_info(onerel, vacrelstats);
+	vacuum_log_cleanup_info(onerel);
 
 	/* Report that we are now vacuuming indexes */
 	pgstat_progress_update_param(PROGRESS_VACUUM_PHASE,
@@ -1733,24 +1729,24 @@ lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
 		 * We can only provide an approximate value of num_heap_tuples in
 		 * vacuum cases.
 		 */
-		lps->lvshared->reltuples = vacrelstats->old_live_tuples;
+		lps->lvshared->reltuples = vacrelstats.old_live_tuples;
 		lps->lvshared->estimated_count = true;
 
-		lazy_parallel_vacuum_indexes(Irel, stats, vacrelstats, lps, nindexes);
+		lazy_parallel_vacuum_indexes(Irel, stats, lps, nindexes);
 	}
 	else
 	{
 		int			idx;
 
 		for (idx = 0; idx < nindexes; idx++)
-			lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats->dead_tuples,
-							  vacrelstats->old_live_tuples);
+			lazy_vacuum_index(Irel[idx], &stats[idx], vacrelstats.dead_tuples,
+							  vacrelstats.old_live_tuples);
 	}
 
 	/* Increase and report the number of index scans */
-	vacrelstats->num_index_scans++;
+	vacrelstats.num_index_scans++;
 	pgstat_progress_update_param(PROGRESS_VACUUM_NUM_INDEX_VACUUMS,
-								 vacrelstats->num_index_scans);
+								 vacrelstats.num_index_scans);
 }
 
 
@@ -1766,7 +1762,7 @@ lazy_vacuum_all_indexes(Relation onerel, Relation *Irel,
  * process index entry removal in batches as large as possible.
  */
 static void
-lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
+lazy_vacuum_heap(Relation onerel)
 {
 	int			tupindex;
 	int			npages;
@@ -1781,7 +1777,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 	npages = 0;
 
 	tupindex = 0;
-	while (tupindex < vacrelstats->dead_tuples->num_tuples)
+	while (tupindex < vacrelstats.dead_tuples->num_tuples)
 	{
 		BlockNumber tblk;
 		Buffer		buf;
@@ -1790,7 +1786,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 
 		vacuum_delay_point();
 
-		tblk = ItemPointerGetBlockNumber(&vacrelstats->dead_tuples->itemptrs[tupindex]);
+		tblk = ItemPointerGetBlockNumber(&vacrelstats.dead_tuples->itemptrs[tupindex]);
 		buf = ReadBufferExtended(onerel, MAIN_FORKNUM, tblk, RBM_NORMAL,
 								 vac_strategy);
 		if (!ConditionalLockBufferForCleanup(buf))
@@ -1799,7 +1795,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
 			++tupindex;
 			continue;
 		}
-		tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex, vacrelstats,
+		tupindex = lazy_vacuum_page(onerel, tblk, buf, tupindex,
 									&vmbuffer);
 
 		/* Now that we've compacted the page, record its available space */
@@ -1836,9 +1832,9 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
  */
 static int
 lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
-				 int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer)
+				 int tupindex, Buffer *vmbuffer)
 {
-	LVDeadTuples *dead_tuples = vacrelstats->dead_tuples;
+	LVDeadTuples *dead_tuples = vacrelstats.dead_tuples;
 	Page		page = BufferGetPage(buffer);
 	OffsetNumber unused[MaxOffsetNumber];
 	int			uncnt = 0;
@@ -1879,7 +1875,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
 		recptr = log_heap_clean(onerel, buffer,
 								NULL, 0, NULL, 0,
 								unused, uncnt,
-								vacrelstats->latestRemovedXid);
+								vacrelstats.latestRemovedXid);
 		PageSetLSN(page, recptr);
 	}
 
@@ -1987,7 +1983,7 @@ lazy_check_needs_freeze(Buffer buf, bool *hastup)
  */
 static void
 lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-							 LVRelStats *vacrelstats, LVParallelState *lps,
+							 LVParallelState *lps,
 							 int nindexes)
 {
 	int			nworkers;
@@ -2021,7 +2017,7 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 	/* Setup the shared cost-based vacuum delay and launch workers */
 	if (nworkers > 0)
 	{
-		if (vacrelstats->num_index_scans > 0)
+		if (vacrelstats.num_index_scans > 0)
 		{
 			/* Reset the parallel index processing counter */
 			pg_atomic_write_u32(&(lps->lvshared->idx), 0);
@@ -2076,14 +2072,14 @@ lazy_parallel_vacuum_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 	}
 
 	/* Process the indexes that can be processed by only leader process */
-	vacuum_indexes_leader(Irel, stats, vacrelstats, lps, nindexes);
+	vacuum_indexes_leader(Irel, stats, lps, nindexes);
 
 	/*
 	 * Join as a parallel worker.  The leader process alone processes all the
 	 * indexes in the case where no workers are launched.
 	 */
 	parallel_vacuum_index(Irel, stats, lps->lvshared,
-						  vacrelstats->dead_tuples, nindexes);
+						  vacrelstats.dead_tuples, nindexes);
 
 	/* Wait for all vacuum workers to finish */
 	WaitForParallelWorkersToFinish(lps->pcxt);
@@ -2157,7 +2153,7 @@ parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
  */
 static void
 vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
-					  LVRelStats *vacrelstats, LVParallelState *lps,
+					  LVParallelState *lps,
 					  int nindexes)
 {
 	int			i;
@@ -2180,7 +2176,7 @@ vacuum_indexes_leader(Relation *Irel, IndexBulkDeleteResult **stats,
 		if (shared_indstats == NULL ||
 			skip_parallel_vacuum_index(Irel[i], lps->lvshared))
 			vacuum_one_index(Irel[i], &(stats[i]), lps->lvshared,
-							 shared_indstats, vacrelstats->dead_tuples);
+							 shared_indstats, vacrelstats.dead_tuples);
 	}
 
 	/*
@@ -2259,7 +2255,7 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
  */
 static void
 lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
-						 LVRelStats *vacrelstats, LVParallelState *lps,
+						 LVParallelState *lps,
 						 int nindexes)
 {
 	int			idx;
@@ -2280,25 +2276,25 @@ lazy_cleanup_all_indexes(Relation *Irel, IndexBulkDeleteResult **stats,
 		/* Tell parallel workers to do index cleanup */
 		lps->lvshared->for_cleanup = true;
 		lps->lvshared->first_time =
-			(vacrelstats->num_index_scans == 0);
+			(vacrelstats.num_index_scans == 0);
 
 		/*
 		 * Now we can provide a better estimate of total number of surviving
 		 * tuples (we assume indexes are more interested in that than in the
 		 * number of nominally live tuples).
 		 */
-		lps->lvshared->reltuples = vacrelstats->new_rel_tuples;
+		lps->lvshared->reltuples = vacrelstats.new_rel_tuples;
 		lps->lvshared->estimated_count =
-			(vacrelstats->tupcount_pages < vacrelstats->rel_pages);
+			(vacrelstats.tupcount_pages < vacrelstats.rel_pages);
 
-		lazy_parallel_vacuum_indexes(Irel, stats, vacrelstats, lps, nindexes);
+		lazy_parallel_vacuum_indexes(Irel, stats, lps, nindexes);
 	}
 	else
 	{
 		for (idx = 0; idx < nindexes; idx++)
 			lazy_cleanup_index(Irel[idx], &stats[idx],
-							   vacrelstats->new_rel_tuples,
-							   vacrelstats->tupcount_pages < vacrelstats->rel_pages);
+							   vacrelstats.new_rel_tuples,
+							   vacrelstats.tupcount_pages < vacrelstats.rel_pages);
 	}
 }
 
@@ -2414,17 +2410,17 @@ lazy_cleanup_index(Relation indrel,
  * careful to depend only on fields that lazy_scan_heap updates on-the-fly.
  */
 static bool
-should_attempt_truncation(VacuumParams *params, LVRelStats *vacrelstats)
+should_attempt_truncation(VacuumParams *params)
 {
 	BlockNumber possibly_freeable;
 
 	if (params->truncate == VACOPT_TERNARY_DISABLED)
 		return false;
 
-	possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
+	possibly_freeable = vacrelstats.rel_pages - vacrelstats.nonempty_pages;
 	if (possibly_freeable > 0 &&
 		(possibly_freeable >= REL_TRUNCATE_MINIMUM ||
-		 possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
+		 possibly_freeable >= vacrelstats.rel_pages / REL_TRUNCATE_FRACTION) &&
 		old_snapshot_threshold < 0)
 		return true;
 	else
@@ -2435,9 +2431,9 @@ should_attempt_truncation(VacuumParams *params, LVRelStats *vacrelstats)
  * lazy_truncate_heap - try to truncate off any empty pages at the end
  */
 static void
-lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
+lazy_truncate_heap(Relation onerel)
 {
-	BlockNumber old_rel_pages = vacrelstats->rel_pages;
+	BlockNumber old_rel_pages = vacrelstats.rel_pages;
 	BlockNumber new_rel_pages;
 	PGRUsage	ru0;
 	int			lock_retry;
@@ -2460,7 +2456,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 		 * (which is quite possible considering we already hold a lower-grade
 		 * lock).
 		 */
-		vacrelstats->lock_waiter_detected = false;
+		vacrelstats.lock_waiter_detected = false;
 		lock_retry = 0;
 		while (true)
 		{
@@ -2480,7 +2476,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 				 * We failed to establish the lock in the specified number of
 				 * retries. This means we give up truncating.
 				 */
-				vacrelstats->lock_waiter_detected = true;
+				vacrelstats.lock_waiter_detected = true;
 				ereport(elevel,
 						(errmsg("\"%s\": stopping truncate due to conflicting lock request",
 								RelationGetRelationName(onerel))));
@@ -2499,7 +2495,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 		if (new_rel_pages != old_rel_pages)
 		{
 			/*
-			 * Note: we intentionally don't update vacrelstats->rel_pages with
+			 * Note: we intentionally don't update vacrelstats.rel_pages with
 			 * the new rel size here.  If we did, it would amount to assuming
 			 * that the new pages are empty, which is unlikely. Leaving the
 			 * numbers alone amounts to assuming that the new pages have the
@@ -2515,7 +2511,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 		 * other backends could have added tuples to these pages whilst we
 		 * were vacuuming.
 		 */
-		new_rel_pages = count_nondeletable_pages(onerel, vacrelstats);
+		new_rel_pages = count_nondeletable_pages(onerel);
 
 		if (new_rel_pages >= old_rel_pages)
 		{
@@ -2543,8 +2539,8 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 		 * without also touching reltuples, since the tuple count wasn't
 		 * changed by the truncation.
 		 */
-		vacrelstats->pages_removed += old_rel_pages - new_rel_pages;
-		vacrelstats->rel_pages = new_rel_pages;
+		vacrelstats.pages_removed += old_rel_pages - new_rel_pages;
+		vacrelstats.rel_pages = new_rel_pages;
 
 		ereport(elevel,
 				(errmsg("\"%s\": truncated %u to %u pages",
@@ -2553,8 +2549,8 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
 				 errdetail_internal("%s",
 									pg_rusage_show(&ru0))));
 		old_rel_pages = new_rel_pages;
-	} while (new_rel_pages > vacrelstats->nonempty_pages &&
-			 vacrelstats->lock_waiter_detected);
+	} while (new_rel_pages > vacrelstats.nonempty_pages &&
+			 vacrelstats.lock_waiter_detected);
 }
 
 /*
@@ -2563,7 +2559,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
  * Returns number of nondeletable pages (last nonempty page + 1).
  */
 static BlockNumber
-count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
+count_nondeletable_pages(Relation onerel)
 {
 	BlockNumber blkno;
 	BlockNumber prefetchedUntil;
@@ -2578,11 +2574,11 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 	 * unsigned.)  To make the scan faster, we prefetch a few blocks at a time
 	 * in forward direction, so that OS-level readahead can kick in.
 	 */
-	blkno = vacrelstats->rel_pages;
+	blkno = vacrelstats.rel_pages;
 	StaticAssertStmt((PREFETCH_SIZE & (PREFETCH_SIZE - 1)) == 0,
 					 "prefetch size must be power of 2");
 	prefetchedUntil = InvalidBlockNumber;
-	while (blkno > vacrelstats->nonempty_pages)
+	while (blkno > vacrelstats.nonempty_pages)
 	{
 		Buffer		buf;
 		Page		page;
@@ -2615,7 +2611,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 							(errmsg("\"%s\": suspending truncate due to conflicting lock request",
 									RelationGetRelationName(onerel))));
 
-					vacrelstats->lock_waiter_detected = true;
+					vacrelstats.lock_waiter_detected = true;
 					return blkno;
 				}
 				starttime = currenttime;
@@ -2695,7 +2691,7 @@ count_nondeletable_pages(Relation onerel, LVRelStats *vacrelstats)
 	 * pages still are; we need not bother to look at the last known-nonempty
 	 * page.
 	 */
-	return vacrelstats->nonempty_pages;
+	return vacrelstats.nonempty_pages;
 }
 
 /*
@@ -2734,18 +2730,18 @@ compute_max_dead_tuples(BlockNumber relblocks, bool useindex)
  * See the comments at the head of this file for rationale.
  */
 static void
-lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks)
+lazy_space_alloc(BlockNumber relblocks)
 {
 	LVDeadTuples *dead_tuples = NULL;
 	long		maxtuples;
 
-	maxtuples = compute_max_dead_tuples(relblocks, vacrelstats->useindex);
+	maxtuples = compute_max_dead_tuples(relblocks, vacrelstats.useindex);
 
 	dead_tuples = (LVDeadTuples *) palloc(SizeOfDeadTuples(maxtuples));
 	dead_tuples->num_tuples = 0;
 	dead_tuples->max_tuples = (int) maxtuples;
 
-	vacrelstats->dead_tuples = dead_tuples;
+	vacrelstats.dead_tuples = dead_tuples;
 }
 
 /*
@@ -3063,7 +3059,7 @@ update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
  * create a parallel context, and then initialize the DSM segment.
  */
 static LVParallelState *
-begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
+begin_parallel_vacuum(Oid relid, Relation *Irel,
 					  BlockNumber nblocks, int nindexes, int nrequested)
 {
 	LVParallelState *lps = NULL;
@@ -3185,7 +3181,7 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
 	dead_tuples->num_tuples = 0;
 	MemSet(dead_tuples->itemptrs, 0, sizeof(ItemPointerData) * maxtuples);
 	shm_toc_insert(pcxt->toc, PARALLEL_VACUUM_KEY_DEAD_TUPLES, dead_tuples);
-	vacrelstats->dead_tuples = dead_tuples;
+	vacrelstats.dead_tuples = dead_tuples;
 
 	/* Store query string for workers */
 	sharedquery = (char *) shm_toc_allocate(pcxt->toc, querylen + 1);
-- 
2.7.4

