From fa726baf070a5e5708dfbe36757e1c4f66ecee8a Mon Sep 17 00:00:00 2001
From: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date: Thu, 18 Dec 2025 23:54:17 +0200
Subject: [PATCH 8/9] WIP: Don't serialize the snapshot for parallel scans

When performing a parallel table scan or index scan, we serialized the
snapshot to use and passed it to the parallel worker. However in
practice, in the executor we always pass the executor's snapshot,
i.e. EState->es_snapshot, which is also always the active
snapshot. The only other place where we use parallel scans is parallel
index builds, and they always pass either SnapshotAny or the current
transaction snapshot. We're not really using on the capability to use
an arbitrary snapshot.

Stop serializing the snapshot as part of the parallel scan
desc. Instead, require the caller to pass it as an argument to
table_beginscan_parallel() in the worker processes. That moves the
responsibility for serializing or otherwise getting the snapshot in
each worker process to the calling code. That's not a problem for the
executor, because the active snapshot is already serialized elsewhere,
and for parallel index scans, we also already have access to the
transaction snapshot. This simplifies the code a little, and
eliminates the overhead of serializing the snapshot.

The reason that this is part of this patch set is that the previous
commit made RestoreSnapshot() slower when there are already snapshots
registered (O(n)). This removes the RestoreSnapshot() calls that
could've become slow, although it'd probably be fine in practice.
---
 src/backend/access/brin/brin.c           | 23 +++++----
 src/backend/access/gin/gininsert.c       | 23 +++++----
 src/backend/access/index/indexam.c       | 16 ++-----
 src/backend/access/nbtree/nbtsort.c      | 23 +++++----
 src/backend/access/table/tableam.c       | 61 ++++--------------------
 src/backend/executor/nodeIndexonlyscan.c |  3 ++
 src/backend/executor/nodeIndexscan.c     |  3 ++
 src/backend/executor/nodeSeqscan.c       | 15 +++---
 src/backend/executor/nodeTidrangescan.c  | 13 ++---
 src/include/access/genam.h               |  1 +
 src/include/access/relscan.h             |  3 --
 src/include/access/tableam.h             | 11 +++--
 12 files changed, 84 insertions(+), 111 deletions(-)

diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index 45d306037a4..d3b5b7e47a8 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -231,7 +231,7 @@ static void brin_fill_empty_ranges(BrinBuildState *state,
 static void _brin_begin_parallel(BrinBuildState *buildstate, Relation heap, Relation index,
 								 bool isconcurrent, int request);
 static void _brin_end_parallel(BrinLeader *brinleader, BrinBuildState *state);
-static Size _brin_parallel_estimate_shared(Relation heap, Snapshot snapshot);
+static Size _brin_parallel_estimate_shared(Relation heap);
 static double _brin_parallel_heapscan(BrinBuildState *state);
 static double _brin_parallel_merge(BrinBuildState *state);
 static void _brin_leader_participate_as_worker(BrinBuildState *buildstate,
@@ -2420,7 +2420,7 @@ _brin_begin_parallel(BrinBuildState *buildstate, Relation heap, Relation index,
 	/*
 	 * Estimate size for our own PARALLEL_KEY_BRIN_SHARED workspace.
 	 */
-	estbrinshared = _brin_parallel_estimate_shared(heap, snapshot);
+	estbrinshared = _brin_parallel_estimate_shared(heap);
 	shm_toc_estimate_chunk(&pcxt->estimator, estbrinshared);
 	estsort = tuplesort_estimate_shared(scantuplesortstates);
 	shm_toc_estimate_chunk(&pcxt->estimator, estsort);
@@ -2483,8 +2483,7 @@ _brin_begin_parallel(BrinBuildState *buildstate, Relation heap, Relation index,
 	brinshared->indtuples = 0.0;
 
 	table_parallelscan_initialize(heap,
-								  ParallelTableScanFromBrinShared(brinshared),
-								  snapshot);
+								  ParallelTableScanFromBrinShared(brinshared));
 
 	/*
 	 * Store shared tuplesort-private state, for which we reserved space.
@@ -2775,14 +2774,14 @@ _brin_parallel_merge(BrinBuildState *state)
 
 /*
  * Returns size of shared memory required to store state for a parallel
- * brin index build based on the snapshot its parallel scan will use.
+ * brin index build.
  */
 static Size
-_brin_parallel_estimate_shared(Relation heap, Snapshot snapshot)
+_brin_parallel_estimate_shared(Relation heap)
 {
 	/* c.f. shm_toc_allocate as to why BUFFERALIGN is used */
 	return add_size(BUFFERALIGN(sizeof(BrinShared)),
-					table_parallelscan_estimate(heap, snapshot));
+					table_parallelscan_estimate(heap));
 }
 
 /*
@@ -2823,6 +2822,7 @@ _brin_parallel_scan_and_build(BrinBuildState *state,
 							  int sortmem, bool progress)
 {
 	SortCoordinate coordinate;
+	Snapshot	snapshot;
 	TableScanDesc scan;
 	double		reltuples;
 	IndexInfo  *indexInfo;
@@ -2837,12 +2837,19 @@ _brin_parallel_scan_and_build(BrinBuildState *state,
 	state->bs_sortstate = tuplesort_begin_index_brin(sortmem, coordinate,
 													 TUPLESORT_NONE);
 
+	/* Use the right snapshot, per the same logic as in _brin_begin_parallel() */
+	if (!brinshared->isconcurrent)
+		snapshot = SnapshotAny;
+	else
+		snapshot = RegisterSnapshot(GetTransactionSnapshot());
+
 	/* Join parallel scan */
 	indexInfo = BuildIndexInfo(index);
 	indexInfo->ii_Concurrent = brinshared->isconcurrent;
 
 	scan = table_beginscan_parallel(heap,
-									ParallelTableScanFromBrinShared(brinshared));
+									ParallelTableScanFromBrinShared(brinshared),
+									snapshot);
 
 	reltuples = table_index_build_scan(heap, index, indexInfo, true, true,
 									   brinbuildCallbackParallel, state, scan);
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index df30dcc0228..ae71bcb716c 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -181,7 +181,7 @@ typedef struct
 static void _gin_begin_parallel(GinBuildState *buildstate, Relation heap, Relation index,
 								bool isconcurrent, int request);
 static void _gin_end_parallel(GinLeader *ginleader, GinBuildState *state);
-static Size _gin_parallel_estimate_shared(Relation heap, Snapshot snapshot);
+static Size _gin_parallel_estimate_shared(Relation heap);
 static double _gin_parallel_heapscan(GinBuildState *state);
 static double _gin_parallel_merge(GinBuildState *state);
 static void _gin_leader_participate_as_worker(GinBuildState *buildstate,
@@ -969,7 +969,7 @@ _gin_begin_parallel(GinBuildState *buildstate, Relation heap, Relation index,
 	/*
 	 * Estimate size for our own PARALLEL_KEY_GIN_SHARED workspace.
 	 */
-	estginshared = _gin_parallel_estimate_shared(heap, snapshot);
+	estginshared = _gin_parallel_estimate_shared(heap);
 	shm_toc_estimate_chunk(&pcxt->estimator, estginshared);
 	estsort = tuplesort_estimate_shared(scantuplesortstates);
 	shm_toc_estimate_chunk(&pcxt->estimator, estsort);
@@ -1031,8 +1031,7 @@ _gin_begin_parallel(GinBuildState *buildstate, Relation heap, Relation index,
 	ginshared->indtuples = 0.0;
 
 	table_parallelscan_initialize(heap,
-								  ParallelTableScanFromGinBuildShared(ginshared),
-								  snapshot);
+								  ParallelTableScanFromGinBuildShared(ginshared));
 
 	/*
 	 * Store shared tuplesort-private state, for which we reserved space.
@@ -1794,14 +1793,14 @@ _gin_parallel_merge(GinBuildState *state)
 
 /*
  * Returns size of shared memory required to store state for a parallel
- * gin index build based on the snapshot its parallel scan will use.
+ * gin index build.
  */
 static Size
-_gin_parallel_estimate_shared(Relation heap, Snapshot snapshot)
+_gin_parallel_estimate_shared(Relation heap)
 {
 	/* c.f. shm_toc_allocate as to why BUFFERALIGN is used */
 	return add_size(BUFFERALIGN(sizeof(GinBuildShared)),
-					table_parallelscan_estimate(heap, snapshot));
+					table_parallelscan_estimate(heap));
 }
 
 /*
@@ -2025,6 +2024,7 @@ _gin_parallel_scan_and_build(GinBuildState *state,
 							 int sortmem, bool progress)
 {
 	SortCoordinate coordinate;
+	Snapshot	snapshot;
 	TableScanDesc scan;
 	double		reltuples;
 	IndexInfo  *indexInfo;
@@ -2057,8 +2057,15 @@ _gin_parallel_scan_and_build(GinBuildState *state,
 	indexInfo = BuildIndexInfo(index);
 	indexInfo->ii_Concurrent = ginshared->isconcurrent;
 
+	/* Use the right snapshot, per the same logic as in _gin_begin_parallel() */
+	if (!ginshared->isconcurrent)
+		snapshot = SnapshotAny;
+	else
+		snapshot = RegisterSnapshot(GetTransactionSnapshot());
+
 	scan = table_beginscan_parallel(heap,
-									ParallelTableScanFromGinBuildShared(ginshared));
+									ParallelTableScanFromGinBuildShared(ginshared),
+									snapshot);
 
 	reltuples = table_index_build_scan(heap, index, indexInfo, true, progress,
 									   ginBuildCallbackParallel, state, scan);
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 3828622665c..9d032814e45 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -303,8 +303,6 @@ index_beginscan_bitmap(Relation indexRelation,
 {
 	IndexScanDesc scan;
 
-	Assert(snapshot != InvalidSnapshot);
-
 	scan = index_beginscan_internal(indexRelation, nkeys, 0, snapshot, NULL, false);
 
 	/*
@@ -478,8 +476,7 @@ index_parallelscan_estimate(Relation indexRelation, int nkeys, int norderbys,
 
 	RELATION_CHECKS;
 
-	nbytes = offsetof(ParallelIndexScanDescData, ps_snapshot_data);
-	nbytes = add_size(nbytes, EstimateSnapshotSpace(&snapshot->mvcc));
+	nbytes = sizeof(ParallelIndexScanDescData);
 	nbytes = MAXALIGN(nbytes);
 
 	if (instrument)
@@ -528,17 +525,14 @@ index_parallelscan_initialize(Relation heapRelation, Relation indexRelation,
 	Assert(instrument || parallel_aware);
 
 	RELATION_CHECKS;
-	Assert(snapshot->snapshot_type == SNAPSHOT_MVCC);
 
-	offset = add_size(offsetof(ParallelIndexScanDescData, ps_snapshot_data),
-					  EstimateSnapshotSpace((MVCCSnapshot) snapshot));
+	offset = sizeof(ParallelIndexScanDescData);
 	offset = MAXALIGN(offset);
 
 	target->ps_locator = heapRelation->rd_locator;
 	target->ps_indexlocator = indexRelation->rd_locator;
 	target->ps_offset_ins = 0;
 	target->ps_offset_am = 0;
-	SerializeSnapshot((MVCCSnapshot) snapshot, target->ps_snapshot_data);
 
 	if (instrument)
 	{
@@ -592,19 +586,19 @@ index_parallelrescan(IndexScanDesc scan)
  */
 IndexScanDesc
 index_beginscan_parallel(Relation heaprel, Relation indexrel,
+						 Snapshot snapshot,
 						 IndexScanInstrumentation *instrument,
 						 int nkeys, int norderbys,
 						 ParallelIndexScanDesc pscan)
 {
-	Snapshot	snapshot;
 	IndexScanDesc scan;
 
+	Assert(snapshot != InvalidSnapshot);
 	Assert(RelFileLocatorEquals(heaprel->rd_locator, pscan->ps_locator));
 	Assert(RelFileLocatorEquals(indexrel->rd_locator, pscan->ps_indexlocator));
 
-	snapshot = (Snapshot) RestoreSnapshot(pscan->ps_snapshot_data);
 	scan = index_beginscan_internal(indexrel, nkeys, norderbys, snapshot,
-									pscan, true);
+									pscan, false);
 
 	/*
 	 * Save additional parameters into the scandesc.  Everything else was set
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index d7695dc1108..b60f4807bde 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -279,7 +279,7 @@ static void _bt_load(BTWriteState *wstate,
 static void _bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent,
 							   int request);
 static void _bt_end_parallel(BTLeader *btleader);
-static Size _bt_parallel_estimate_shared(Relation heap, Snapshot snapshot);
+static Size _bt_parallel_estimate_shared(Relation heap);
 static double _bt_parallel_heapscan(BTBuildState *buildstate,
 									bool *brokenhotchain);
 static void _bt_leader_participate_as_worker(BTBuildState *buildstate);
@@ -1441,7 +1441,7 @@ _bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent, int request)
 	 * Estimate size for our own PARALLEL_KEY_BTREE_SHARED workspace, and
 	 * PARALLEL_KEY_TUPLESORT tuplesort workspace
 	 */
-	estbtshared = _bt_parallel_estimate_shared(btspool->heap, snapshot);
+	estbtshared = _bt_parallel_estimate_shared(btspool->heap);
 	shm_toc_estimate_chunk(&pcxt->estimator, estbtshared);
 	estsort = tuplesort_estimate_shared(scantuplesortstates);
 	shm_toc_estimate_chunk(&pcxt->estimator, estsort);
@@ -1515,8 +1515,7 @@ _bt_begin_parallel(BTBuildState *buildstate, bool isconcurrent, int request)
 	btshared->indtuples = 0.0;
 	btshared->brokenhotchain = false;
 	table_parallelscan_initialize(btspool->heap,
-								  ParallelTableScanFromBTShared(btshared),
-								  snapshot);
+								  ParallelTableScanFromBTShared(btshared));
 
 	/*
 	 * Store shared tuplesort-private state, for which we reserved space.
@@ -1628,14 +1627,14 @@ _bt_end_parallel(BTLeader *btleader)
 
 /*
  * Returns size of shared memory required to store state for a parallel
- * btree index build based on the snapshot its parallel scan will use.
+ * btree index build.
  */
 static Size
-_bt_parallel_estimate_shared(Relation heap, Snapshot snapshot)
+_bt_parallel_estimate_shared(Relation heap)
 {
 	/* c.f. shm_toc_allocate as to why BUFFERALIGN is used */
 	return add_size(BUFFERALIGN(sizeof(BTShared)),
-					table_parallelscan_estimate(heap, snapshot));
+					table_parallelscan_estimate(heap));
 }
 
 /*
@@ -1869,6 +1868,7 @@ _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2,
 {
 	SortCoordinate coordinate;
 	BTBuildState buildstate;
+	Snapshot	snapshot;
 	TableScanDesc scan;
 	double		reltuples;
 	IndexInfo  *indexInfo;
@@ -1921,11 +1921,18 @@ _bt_parallel_scan_and_sort(BTSpool *btspool, BTSpool *btspool2,
 	buildstate.indtuples = 0;
 	buildstate.btleader = NULL;
 
+	/* Use the right snapshot, per the same logic as in _bt_begin_parallel() */
+	if (!btshared->isconcurrent)
+		snapshot = SnapshotAny;
+	else
+		snapshot = RegisterSnapshot(GetTransactionSnapshot());
+
 	/* Join parallel scan */
 	indexInfo = BuildIndexInfo(btspool->index);
 	indexInfo->ii_Concurrent = btshared->isconcurrent;
 	scan = table_beginscan_parallel(btspool->heap,
-									ParallelTableScanFromBTShared(btshared));
+									ParallelTableScanFromBTShared(btshared),
+									snapshot);
 	reltuples = table_index_build_scan(btspool->heap, btspool->index, indexInfo,
 									   true, progress, _bt_build_callback,
 									   &buildstate, scan);
diff --git a/src/backend/access/table/tableam.c b/src/backend/access/table/tableam.c
index c8db2918f40..61f5ae13474 100644
--- a/src/backend/access/table/tableam.c
+++ b/src/backend/access/table/tableam.c
@@ -128,70 +128,39 @@ table_beginscan_catalog(Relation relation, int nkeys, ScanKeyData *key)
  */
 
 Size
-table_parallelscan_estimate(Relation rel, Snapshot snapshot)
+table_parallelscan_estimate(Relation rel)
 {
-	Size		sz = 0;
+	Size		sz;
 
-	if (IsMVCCSnapshot(snapshot))
-		sz = add_size(sz, EstimateSnapshotSpace((MVCCSnapshot) snapshot));
-	else
-		Assert(snapshot == SnapshotAny);
-
-	sz = add_size(sz, rel->rd_tableam->parallelscan_estimate(rel));
+	sz = rel->rd_tableam->parallelscan_estimate(rel);
 
 	return sz;
 }
 
 void
-table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan,
-							  Snapshot snapshot)
+table_parallelscan_initialize(Relation rel, ParallelTableScanDesc pscan)
 {
-	Size		snapshot_off = rel->rd_tableam->parallelscan_initialize(rel, pscan);
-
-	pscan->phs_snapshot_off = snapshot_off;
-
-	if (IsMVCCSnapshot(snapshot))
-	{
-		SerializeSnapshot((MVCCSnapshot) snapshot, (char *) pscan + pscan->phs_snapshot_off);
-		pscan->phs_snapshot_any = false;
-	}
-	else
-	{
-		Assert(snapshot == SnapshotAny);
-		pscan->phs_snapshot_any = true;
-	}
+	(void) rel->rd_tableam->parallelscan_initialize(rel, pscan);
 }
 
 TableScanDesc
-table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan)
+table_beginscan_parallel(Relation relation, ParallelTableScanDesc pscan,
+						 Snapshot snapshot)
 {
-	Snapshot	snapshot;
 	uint32		flags = SO_TYPE_SEQSCAN |
 		SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
 
 	Assert(RelFileLocatorEquals(relation->rd_locator, pscan->phs_locator));
 
-	if (!pscan->phs_snapshot_any)
-	{
-		/* Snapshot was serialized -- restore it */
-		snapshot = (Snapshot) RestoreSnapshot((char *) pscan + pscan->phs_snapshot_off);
-		flags |= SO_TEMP_SNAPSHOT;
-	}
-	else
-	{
-		/* SnapshotAny passed by caller (not serialized) */
-		snapshot = SnapshotAny;
-	}
-
 	return relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
 											pscan, flags);
 }
 
 TableScanDesc
 table_beginscan_parallel_tidrange(Relation relation,
-								  ParallelTableScanDesc pscan)
+								  ParallelTableScanDesc pscan,
+								  Snapshot snapshot)
 {
-	Snapshot	snapshot;
 	uint32		flags = SO_TYPE_TIDRANGESCAN | SO_ALLOW_PAGEMODE;
 	TableScanDesc sscan;
 
@@ -200,18 +169,6 @@ table_beginscan_parallel_tidrange(Relation relation,
 	/* disable syncscan in parallel tid range scan. */
 	pscan->phs_syncscan = false;
 
-	if (!pscan->phs_snapshot_any)
-	{
-		/* Snapshot was serialized -- restore it */
-		snapshot = (Snapshot) RestoreSnapshot((char *) pscan + pscan->phs_snapshot_off);
-		flags |= SO_TEMP_SNAPSHOT;
-	}
-	else
-	{
-		/* SnapshotAny passed by caller (not serialized) */
-		snapshot = SnapshotAny;
-	}
-
 	sscan = relation->rd_tableam->scan_begin(relation, snapshot, 0, NULL,
 											 pscan, flags);
 	return sscan;
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index 6bea42f128f..de4c1dd692c 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -785,6 +785,7 @@ ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node,
 	node->ioss_ScanDesc =
 		index_beginscan_parallel(node->ss.ss_currentRelation,
 								 node->ioss_RelationDesc,
+								 estate->es_snapshot,
 								 &node->ioss_Instrument,
 								 node->ioss_NumScanKeys,
 								 node->ioss_NumOrderByKeys,
@@ -826,6 +827,7 @@ void
 ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node,
 								  ParallelWorkerContext *pwcxt)
 {
+	EState	   *estate = node->ss.ps.state;
 	ParallelIndexScanDesc piscan;
 	bool		instrument = node->ss.ps.instrument != NULL;
 	bool		parallel_aware = node->ss.ps.plan->parallel_aware;
@@ -851,6 +853,7 @@ ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node,
 	node->ioss_ScanDesc =
 		index_beginscan_parallel(node->ss.ss_currentRelation,
 								 node->ioss_RelationDesc,
+								 estate->es_snapshot,
 								 &node->ioss_Instrument,
 								 node->ioss_NumScanKeys,
 								 node->ioss_NumOrderByKeys,
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 72b135e5dcf..dff1aac452b 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -1720,6 +1720,7 @@ ExecIndexScanInitializeDSM(IndexScanState *node,
 	node->iss_ScanDesc =
 		index_beginscan_parallel(node->ss.ss_currentRelation,
 								 node->iss_RelationDesc,
+								 estate->es_snapshot,
 								 &node->iss_Instrument,
 								 node->iss_NumScanKeys,
 								 node->iss_NumOrderByKeys,
@@ -1759,6 +1760,7 @@ void
 ExecIndexScanInitializeWorker(IndexScanState *node,
 							  ParallelWorkerContext *pwcxt)
 {
+	EState	   *estate = node->ss.ps.state;
 	ParallelIndexScanDesc piscan;
 	bool		instrument = node->ss.ps.instrument != NULL;
 	bool		parallel_aware = node->ss.ps.plan->parallel_aware;
@@ -1784,6 +1786,7 @@ ExecIndexScanInitializeWorker(IndexScanState *node,
 	node->iss_ScanDesc =
 		index_beginscan_parallel(node->ss.ss_currentRelation,
 								 node->iss_RelationDesc,
+								 estate->es_snapshot,
 								 &node->iss_Instrument,
 								 node->iss_NumScanKeys,
 								 node->iss_NumOrderByKeys,
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 94047d29430..ebc3d35e11b 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -347,10 +347,7 @@ void
 ExecSeqScanEstimate(SeqScanState *node,
 					ParallelContext *pcxt)
 {
-	EState	   *estate = node->ss.ps.state;
-
-	node->pscan_len = table_parallelscan_estimate(node->ss.ss_currentRelation,
-												  estate->es_snapshot);
+	node->pscan_len = table_parallelscan_estimate(node->ss.ss_currentRelation);
 	shm_toc_estimate_chunk(&pcxt->estimator, node->pscan_len);
 	shm_toc_estimate_keys(&pcxt->estimator, 1);
 }
@@ -370,11 +367,11 @@ ExecSeqScanInitializeDSM(SeqScanState *node,
 
 	pscan = shm_toc_allocate(pcxt->toc, node->pscan_len);
 	table_parallelscan_initialize(node->ss.ss_currentRelation,
-								  pscan,
-								  estate->es_snapshot);
+								  pscan);
 	shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pscan);
 	node->ss.ss_currentScanDesc =
-		table_beginscan_parallel(node->ss.ss_currentRelation, pscan);
+		table_beginscan_parallel(node->ss.ss_currentRelation, pscan,
+								 estate->es_snapshot);
 }
 
 /* ----------------------------------------------------------------
@@ -403,9 +400,11 @@ void
 ExecSeqScanInitializeWorker(SeqScanState *node,
 							ParallelWorkerContext *pwcxt)
 {
+	EState	   *estate = node->ss.ps.state;
 	ParallelTableScanDesc pscan;
 
 	pscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false);
 	node->ss.ss_currentScanDesc =
-		table_beginscan_parallel(node->ss.ss_currentRelation, pscan);
+		table_beginscan_parallel(node->ss.ss_currentRelation, pscan,
+								 estate->es_snapshot);
 }
diff --git a/src/backend/executor/nodeTidrangescan.c b/src/backend/executor/nodeTidrangescan.c
index 4ceb181d622..f80c3de630f 100644
--- a/src/backend/executor/nodeTidrangescan.c
+++ b/src/backend/executor/nodeTidrangescan.c
@@ -431,11 +431,8 @@ ExecInitTidRangeScan(TidRangeScan *node, EState *estate, int eflags)
 void
 ExecTidRangeScanEstimate(TidRangeScanState *node, ParallelContext *pcxt)
 {
-	EState	   *estate = node->ss.ps.state;
-
 	node->trss_pscanlen =
-		table_parallelscan_estimate(node->ss.ss_currentRelation,
-									estate->es_snapshot);
+		table_parallelscan_estimate(node->ss.ss_currentRelation);
 	shm_toc_estimate_chunk(&pcxt->estimator, node->trss_pscanlen);
 	shm_toc_estimate_keys(&pcxt->estimator, 1);
 }
@@ -454,12 +451,11 @@ ExecTidRangeScanInitializeDSM(TidRangeScanState *node, ParallelContext *pcxt)
 
 	pscan = shm_toc_allocate(pcxt->toc, node->trss_pscanlen);
 	table_parallelscan_initialize(node->ss.ss_currentRelation,
-								  pscan,
-								  estate->es_snapshot);
+								  pscan);
 	shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, pscan);
 	node->ss.ss_currentScanDesc =
 		table_beginscan_parallel_tidrange(node->ss.ss_currentRelation,
-										  pscan);
+										  pscan, estate->es_snapshot);
 }
 
 /* ----------------------------------------------------------------
@@ -488,10 +484,11 @@ void
 ExecTidRangeScanInitializeWorker(TidRangeScanState *node,
 								 ParallelWorkerContext *pwcxt)
 {
+	EState	   *estate = node->ss.ps.state;
 	ParallelTableScanDesc pscan;
 
 	pscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false);
 	node->ss.ss_currentScanDesc =
 		table_beginscan_parallel_tidrange(node->ss.ss_currentRelation,
-										  pscan);
+										  pscan, estate->es_snapshot);
 }
diff --git a/src/include/access/genam.h b/src/include/access/genam.h
index 9200a22bd9f..411137997e1 100644
--- a/src/include/access/genam.h
+++ b/src/include/access/genam.h
@@ -201,6 +201,7 @@ extern void index_parallelscan_initialize(Relation heapRelation,
 extern void index_parallelrescan(IndexScanDesc scan);
 extern IndexScanDesc index_beginscan_parallel(Relation heaprel,
 											  Relation indexrel,
+											  Snapshot snapshot,
 											  IndexScanInstrumentation *instrument,
 											  int nkeys, int norderbys,
 											  ParallelIndexScanDesc pscan);
diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h
index 0f8fdcff782..1ba1a432618 100644
--- a/src/include/access/relscan.h
+++ b/src/include/access/relscan.h
@@ -81,8 +81,6 @@ typedef struct ParallelTableScanDescData
 {
 	RelFileLocator phs_locator; /* physical relation to scan */
 	bool		phs_syncscan;	/* report location to syncscan logic? */
-	bool		phs_snapshot_any;	/* SnapshotAny, not phs_snapshot_data? */
-	Size		phs_snapshot_off;	/* data for snapshot */
 } ParallelTableScanDescData;
 typedef struct ParallelTableScanDescData *ParallelTableScanDesc;
 
@@ -200,7 +198,6 @@ typedef struct ParallelIndexScanDescData
 	RelFileLocator ps_indexlocator; /* physical index relation to scan */
 	Size		ps_offset_ins;	/* Offset to SharedIndexScanInstrumentation */
 	Size		ps_offset_am;	/* Offset to am-specific structure */
-	char		ps_snapshot_data[FLEXIBLE_ARRAY_MEMBER];
 }			ParallelIndexScanDescData;
 
 struct TupleTableSlot;
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index 2fa790b6bf5..00c2e2c6840 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -1108,7 +1108,7 @@ table_scan_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
  * Estimate the size of shared memory needed for a parallel scan of this
  * relation.
  */
-extern Size table_parallelscan_estimate(Relation rel, Snapshot snapshot);
+extern Size table_parallelscan_estimate(Relation rel);
 
 /*
  * Initialize ParallelTableScanDesc for a parallel scan of this
@@ -1117,8 +1117,7 @@ extern Size table_parallelscan_estimate(Relation rel, Snapshot snapshot);
  * individual workers attach via table_beginscan_parallel.
  */
 extern void table_parallelscan_initialize(Relation rel,
-										  ParallelTableScanDesc pscan,
-										  Snapshot snapshot);
+										  ParallelTableScanDesc pscan);
 
 /*
  * Begin a parallel scan. `pscan` needs to have been initialized with
@@ -1128,7 +1127,8 @@ extern void table_parallelscan_initialize(Relation rel,
  * Caller must hold a suitable lock on the relation.
  */
 extern TableScanDesc table_beginscan_parallel(Relation relation,
-											  ParallelTableScanDesc pscan);
+											  ParallelTableScanDesc pscan,
+											  Snapshot snapshot);
 
 /*
  * Begin a parallel tid range scan. `pscan` needs to have been initialized
@@ -1138,7 +1138,8 @@ extern TableScanDesc table_beginscan_parallel(Relation relation,
  * Caller must hold a suitable lock on the relation.
  */
 extern TableScanDesc table_beginscan_parallel_tidrange(Relation relation,
-													   ParallelTableScanDesc pscan);
+													   ParallelTableScanDesc pscan,
+													   Snapshot snapshot);
 
 /*
  * Restart a parallel scan.  Call this in the leader process.  Caller is
-- 
2.47.3

