diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index ff03c68..e29c5ad 100644
*** a/src/backend/access/heap/heapam.c
--- b/src/backend/access/heap/heapam.c
*************** heap_rescan(HeapScanDesc scan,
*** 1531,1551 ****
  	 * reinitialize scan descriptor
  	 */
  	initscan(scan, key, true);
- 
- 	/*
- 	 * reset parallel scan, if present
- 	 */
- 	if (scan->rs_parallel != NULL)
- 	{
- 		ParallelHeapScanDesc parallel_scan;
- 
- 		/*
- 		 * Caller is responsible for making sure that all workers have
- 		 * finished the scan before calling this.
- 		 */
- 		parallel_scan = scan->rs_parallel;
- 		pg_atomic_write_u64(&parallel_scan->phs_nallocated, 0);
- 	}
  }
  
  /* ----------------
--- 1531,1536 ----
*************** heap_parallelscan_initialize(ParallelHea
*** 1643,1648 ****
--- 1628,1646 ----
  }
  
  /* ----------------
+  *		heap_parallelscan_reinitialize - reset a parallel scan
+  *
+  *		Call this in the leader process.  Caller is responsible for
+  *		making sure that all workers have finished the scan beforehand.
+  * ----------------
+  */
+ void
+ heap_parallelscan_reinitialize(ParallelHeapScanDesc parallel_scan)
+ {
+ 	pg_atomic_write_u64(&parallel_scan->phs_nallocated, 0);
+ }
+ 
+ /* ----------------
   *		heap_beginscan_parallel - join a parallel scan
   *
   *		Caller must hold a suitable lock on the correct relation.
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index ce47f1d..d8cdb0e 100644
*** a/src/backend/executor/execParallel.c
--- b/src/backend/executor/execParallel.c
*************** static bool ExecParallelInitializeDSM(Pl
*** 109,114 ****
--- 109,116 ----
  						  ExecParallelInitializeDSMContext *d);
  static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
  							 bool reinitialize);
+ static bool ExecParallelReInitializeDSM(PlanState *planstate,
+ 							ParallelContext *pcxt);
  static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
  									SharedExecutorInstrumentation *instrumentation);
  
*************** ExecParallelSetupTupleQueues(ParallelCon
*** 365,382 ****
  }
  
  /*
-  * Re-initialize the parallel executor info such that it can be reused by
-  * workers.
-  */
- void
- ExecParallelReinitialize(ParallelExecutorInfo *pei)
- {
- 	ReinitializeParallelDSM(pei->pcxt);
- 	pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true);
- 	pei->finished = false;
- }
- 
- /*
   * Sets up the required infrastructure for backend workers to perform
   * execution and return results to the main backend.
   */
--- 367,372 ----
*************** ExecInitParallelPlan(PlanState *planstat
*** 567,573 ****
  	ExecParallelInitializeDSM(planstate, &d);
  
  	/*
! 	 * Make sure that the world hasn't shifted under our feat.  This could
  	 * probably just be an Assert(), but let's be conservative for now.
  	 */
  	if (e.nnodes != d.nnodes)
--- 557,563 ----
  	ExecParallelInitializeDSM(planstate, &d);
  
  	/*
! 	 * Make sure that the world hasn't shifted under our feet.  This could
  	 * probably just be an Assert(), but let's be conservative for now.
  	 */
  	if (e.nnodes != d.nnodes)
*************** ExecInitParallelPlan(PlanState *planstat
*** 578,583 ****
--- 568,639 ----
  }
  
  /*
+  * Re-initialize the parallel executor shared memory state before launching
+  * a fresh batch of workers.
+  */
+ void
+ ExecParallelReinitialize(PlanState *planstate,
+ 						 ParallelExecutorInfo *pei)
+ {
+ 	ReinitializeParallelDSM(pei->pcxt);
+ 	pei->tqueue = ExecParallelSetupTupleQueues(pei->pcxt, true);
+ 	pei->finished = false;
+ 
+ 	/* Traverse plan tree and let each child node reset associated state. */
+ 	ExecParallelReInitializeDSM(planstate, pei->pcxt);
+ }
+ 
+ /*
+  * Traverse plan tree to reinitialize per-node dynamic shared memory state
+  */
+ static bool
+ ExecParallelReInitializeDSM(PlanState *planstate,
+ 							ParallelContext *pcxt)
+ {
+ 	if (planstate == NULL)
+ 		return false;
+ 
+ 	/*
+ 	 * Call reinitializers for parallel-aware plan nodes.
+ 	 */
+ 	if (planstate->plan->parallel_aware)
+ 	{
+ 		switch (nodeTag(planstate))
+ 		{
+ 			case T_SeqScanState:
+ 				ExecSeqScanReInitializeDSM((SeqScanState *) planstate,
+ 										   pcxt);
+ 				break;
+ 			case T_IndexScanState:
+ 				ExecIndexScanReInitializeDSM((IndexScanState *) planstate,
+ 											 pcxt);
+ 				break;
+ 			case T_IndexOnlyScanState:
+ 				ExecIndexOnlyScanReInitializeDSM((IndexOnlyScanState *) planstate,
+ 												 pcxt);
+ 				break;
+ 			case T_ForeignScanState:
+ 				ExecForeignScanReInitializeDSM((ForeignScanState *) planstate,
+ 											   pcxt);
+ 				break;
+ 			case T_CustomScanState:
+ 				ExecCustomScanReInitializeDSM((CustomScanState *) planstate,
+ 											  pcxt);
+ 				break;
+ 			case T_BitmapHeapScanState:
+ 				ExecBitmapHeapReInitializeDSM((BitmapHeapScanState *) planstate,
+ 											  pcxt);
+ 				break;
+ 
+ 			default:
+ 				break;
+ 		}
+ 	}
+ 
+ 	return planstate_tree_walker(planstate, ExecParallelReInitializeDSM, pcxt);
+ }
+ 
+ /*
   * Copy instrumentation information about this node and its descendants from
   * dynamic shared memory.
   */
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 79f534e..f7e55e0 100644
*** a/src/backend/executor/nodeBitmapHeapscan.c
--- b/src/backend/executor/nodeBitmapHeapscan.c
*************** ExecReScanBitmapHeapScan(BitmapHeapScanS
*** 705,727 ****
  	node->shared_tbmiterator = NULL;
  	node->shared_prefetch_iterator = NULL;
  
- 	/* Reset parallel bitmap state, if present */
- 	if (node->pstate)
- 	{
- 		dsa_area   *dsa = node->ss.ps.state->es_query_dsa;
- 
- 		node->pstate->state = BM_INITIAL;
- 
- 		if (DsaPointerIsValid(node->pstate->tbmiterator))
- 			tbm_free_shared_area(dsa, node->pstate->tbmiterator);
- 
- 		if (DsaPointerIsValid(node->pstate->prefetch_iterator))
- 			tbm_free_shared_area(dsa, node->pstate->prefetch_iterator);
- 
- 		node->pstate->tbmiterator = InvalidDsaPointer;
- 		node->pstate->prefetch_iterator = InvalidDsaPointer;
- 	}
- 
  	ExecScanReScan(&node->ss);
  
  	/*
--- 705,710 ----
*************** ExecBitmapHeapInitializeDSM(BitmapHeapSc
*** 1000,1005 ****
--- 983,1013 ----
  }
  
  /* ----------------------------------------------------------------
+  *		ExecBitmapHeapReInitializeDSM
+  *
+  *		Reset shared state before beginning a fresh scan.
+  * ----------------------------------------------------------------
+  */
+ void
+ ExecBitmapHeapReInitializeDSM(BitmapHeapScanState *node,
+ 							  ParallelContext *pcxt)
+ {
+ 	ParallelBitmapHeapState *pstate = node->pstate;
+ 	dsa_area   *dsa = node->ss.ps.state->es_query_dsa;
+ 
+ 	pstate->state = BM_INITIAL;
+ 
+ 	if (DsaPointerIsValid(pstate->tbmiterator))
+ 		tbm_free_shared_area(dsa, pstate->tbmiterator);
+ 
+ 	if (DsaPointerIsValid(pstate->prefetch_iterator))
+ 		tbm_free_shared_area(dsa, pstate->prefetch_iterator);
+ 
+ 	pstate->tbmiterator = InvalidDsaPointer;
+ 	pstate->prefetch_iterator = InvalidDsaPointer;
+ }
+ 
+ /* ----------------------------------------------------------------
   *		ExecBitmapHeapInitializeWorker
   *
   *		Copy relevant information from TOC into planstate.
diff --git a/src/backend/executor/nodeCustom.c b/src/backend/executor/nodeCustom.c
index fb7645b..7ec72d7 100644
*** a/src/backend/executor/nodeCustom.c
--- b/src/backend/executor/nodeCustom.c
*************** ExecCustomScanInitializeDSM(CustomScanSt
*** 195,200 ****
--- 195,206 ----
  }
  
  void
+ ExecCustomScanReInitializeDSM(CustomScanState *node, ParallelContext *pcxt)
+ {
+ 	/* XXX */
+ }
+ 
+ void
  ExecCustomScanInitializeWorker(CustomScanState *node, shm_toc *toc)
  {
  	const CustomExecMethods *methods = node->methods;
diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c
index 140e82e..779d0db 100644
*** a/src/backend/executor/nodeForeignscan.c
--- b/src/backend/executor/nodeForeignscan.c
*************** ExecForeignScanInitializeDSM(ForeignScan
*** 332,338 ****
  }
  
  /* ----------------------------------------------------------------
!  *		ExecForeignScanInitializeDSM
   *
   *		Initialization according to the parallel coordination information
   * ----------------------------------------------------------------
--- 332,350 ----
  }
  
  /* ----------------------------------------------------------------
!  *		ExecForeignScanReInitializeDSM
!  *
!  *		Reset shared state before beginning a fresh scan.
!  * ----------------------------------------------------------------
!  */
! void
! ExecForeignScanReInitializeDSM(ForeignScanState *node, ParallelContext *pcxt)
! {
! 	/* XXX */
! }
! 
! /* ----------------------------------------------------------------
!  *		ExecForeignScanInitializeWorker
   *
   *		Initialization according to the parallel coordination information
   * ----------------------------------------------------------------
diff --git a/src/backend/executor/nodeGather.c b/src/backend/executor/nodeGather.c
index e8d94ee..b8e24d9 100644
*** a/src/backend/executor/nodeGather.c
--- b/src/backend/executor/nodeGather.c
*************** ExecGather(PlanState *pstate)
*** 152,162 ****
  		{
  			ParallelContext *pcxt;
  
! 			/* Initialize the workers required to execute Gather node. */
  			if (!node->pei)
  				node->pei = ExecInitParallelPlan(node->ps.lefttree,
  												 estate,
  												 gather->num_workers);
  
  			/*
  			 * Register backend workers. We might not get as many as we
--- 152,165 ----
  		{
  			ParallelContext *pcxt;
  
! 			/* Initialize, or re-initialize, shared state needed by workers. */
  			if (!node->pei)
  				node->pei = ExecInitParallelPlan(node->ps.lefttree,
  												 estate,
  												 gather->num_workers);
+ 			else
+ 				ExecParallelReinitialize(node->ps.lefttree,
+ 										 node->pei);
  
  			/*
  			 * Register backend workers. We might not get as many as we
*************** ExecShutdownGather(GatherState *node)
*** 430,447 ****
  void
  ExecReScanGather(GatherState *node)
  {
! 	/*
! 	 * Re-initialize the parallel workers to perform rescan of relation. We
! 	 * want to gracefully shutdown all the workers so that they should be able
! 	 * to propagate any error or other information to master backend before
! 	 * dying.  Parallel context will be reused for rescan.
! 	 */
  	ExecShutdownGatherWorkers(node);
  
  	node->initialized = false;
  
- 	if (node->pei)
- 		ExecParallelReinitialize(node->pei);
- 
  	ExecReScan(node->ps.lefttree);
  }
--- 433,443 ----
  void
  ExecReScanGather(GatherState *node)
  {
! 	/* Make sure any existing workers are gracefully shut down */
  	ExecShutdownGatherWorkers(node);
  
+ 	/* Mark node so that shared state will be rebuilt at next call */
  	node->initialized = false;
  
  	ExecReScan(node->ps.lefttree);
  }
diff --git a/src/backend/executor/nodeGatherMerge.c b/src/backend/executor/nodeGatherMerge.c
index 64c6239..26fde0b 100644
*** a/src/backend/executor/nodeGatherMerge.c
--- b/src/backend/executor/nodeGatherMerge.c
*************** ExecGatherMerge(PlanState *pstate)
*** 186,196 ****
  		{
  			ParallelContext *pcxt;
  
! 			/* Initialize data structures for workers. */
  			if (!node->pei)
  				node->pei = ExecInitParallelPlan(node->ps.lefttree,
  												 estate,
  												 gm->num_workers);
  
  			/* Try to launch workers. */
  			pcxt = node->pei->pcxt;
--- 186,199 ----
  		{
  			ParallelContext *pcxt;
  
! 			/* Initialize, or re-initialize, shared state needed by workers. */
  			if (!node->pei)
  				node->pei = ExecInitParallelPlan(node->ps.lefttree,
  												 estate,
  												 gm->num_workers);
+ 			else
+ 				ExecParallelReinitialize(node->ps.lefttree,
+ 										 node->pei);
  
  			/* Try to launch workers. */
  			pcxt = node->pei->pcxt;
*************** ExecShutdownGatherMergeWorkers(GatherMer
*** 325,344 ****
  void
  ExecReScanGatherMerge(GatherMergeState *node)
  {
! 	/*
! 	 * Re-initialize the parallel workers to perform rescan of relation. We
! 	 * want to gracefully shutdown all the workers so that they should be able
! 	 * to propagate any error or other information to master backend before
! 	 * dying.  Parallel context will be reused for rescan.
! 	 */
  	ExecShutdownGatherMergeWorkers(node);
  
  	node->initialized = false;
  	node->gm_initialized = false;
  
- 	if (node->pei)
- 		ExecParallelReinitialize(node->pei);
- 
  	ExecReScan(node->ps.lefttree);
  }
  
--- 328,340 ----
  void
  ExecReScanGatherMerge(GatherMergeState *node)
  {
! 	/* Make sure any existing workers are gracefully shut down */
  	ExecShutdownGatherMergeWorkers(node);
  
+ 	/* Mark node so that shared state will be rebuilt at next call */
  	node->initialized = false;
  	node->gm_initialized = false;
  
  	ExecReScan(node->ps.lefttree);
  }
  
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index fe7ba3f..5351cb8 100644
*** a/src/backend/executor/nodeIndexonlyscan.c
--- b/src/backend/executor/nodeIndexonlyscan.c
***************
*** 25,30 ****
--- 25,31 ----
   *						parallel index-only scan
   *		ExecIndexOnlyScanInitializeDSM	initialize DSM for parallel
   *						index-only scan
+  *		ExecIndexOnlyScanReInitializeDSM	reinitialize DSM for fresh scan
   *		ExecIndexOnlyScanInitializeWorker attach to DSM info in parallel worker
   */
  #include "postgres.h"
*************** ExecIndexOnlyScan(PlanState *pstate)
*** 336,351 ****
  void
  ExecReScanIndexOnlyScan(IndexOnlyScanState *node)
  {
- 	bool		reset_parallel_scan = true;
- 
- 	/*
- 	 * If we are here to just update the scan keys, then don't reset parallel
- 	 * scan. For detailed reason behind this look in the comments for
- 	 * ExecReScanIndexScan.
- 	 */
- 	if (node->ioss_NumRuntimeKeys != 0 && !node->ioss_RuntimeKeysReady)
- 		reset_parallel_scan = false;
- 
  	/*
  	 * If we are doing runtime key calculations (ie, any of the index key
  	 * values weren't simple Consts), compute the new key values.  But first,
--- 337,342 ----
*************** ExecReScanIndexOnlyScan(IndexOnlyScanSta
*** 366,380 ****
  
  	/* reset index scan */
  	if (node->ioss_ScanDesc)
- 	{
- 
  		index_rescan(node->ioss_ScanDesc,
  					 node->ioss_ScanKeys, node->ioss_NumScanKeys,
  					 node->ioss_OrderByKeys, node->ioss_NumOrderByKeys);
  
- 		if (reset_parallel_scan && node->ioss_ScanDesc->parallel_scan)
- 			index_parallelrescan(node->ioss_ScanDesc);
- 	}
  	ExecScanReScan(&node->ss);
  }
  
--- 357,366 ----
*************** ExecIndexOnlyScanInitializeDSM(IndexOnly
*** 672,677 ****
--- 658,676 ----
  }
  
  /* ----------------------------------------------------------------
+  *		ExecIndexOnlyScanReInitializeDSM
+  *
+  *		Reset shared state before beginning a fresh scan.
+  * ----------------------------------------------------------------
+  */
+ void
+ ExecIndexOnlyScanReInitializeDSM(IndexOnlyScanState *node,
+ 								 ParallelContext *pcxt)
+ {
+ 	index_parallelrescan(node->ioss_ScanDesc);
+ }
+ 
+ /* ----------------------------------------------------------------
   *		ExecIndexOnlyScanInitializeWorker
   *
   *		Copy relevant information from TOC into planstate.
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 404076d..638b17b 100644
*** a/src/backend/executor/nodeIndexscan.c
--- b/src/backend/executor/nodeIndexscan.c
***************
*** 24,29 ****
--- 24,30 ----
   *		ExecIndexRestrPos		restores scan position.
   *		ExecIndexScanEstimate	estimates DSM space needed for parallel index scan
   *		ExecIndexScanInitializeDSM initialize DSM for parallel indexscan
+  *		ExecIndexScanReInitializeDSM reinitialize DSM for fresh scan
   *		ExecIndexScanInitializeWorker attach to DSM info in parallel worker
   */
  #include "postgres.h"
*************** ExecIndexScan(PlanState *pstate)
*** 577,594 ****
  void
  ExecReScanIndexScan(IndexScanState *node)
  {
- 	bool		reset_parallel_scan = true;
- 
- 	/*
- 	 * If we are here to just update the scan keys, then don't reset parallel
- 	 * scan.  We don't want each of the participating process in the parallel
- 	 * scan to update the shared parallel scan state at the start of the scan.
- 	 * It is quite possible that one of the participants has already begun
- 	 * scanning the index when another has yet to start it.
- 	 */
- 	if (node->iss_NumRuntimeKeys != 0 && !node->iss_RuntimeKeysReady)
- 		reset_parallel_scan = false;
- 
  	/*
  	 * If we are doing runtime key calculations (ie, any of the index key
  	 * values weren't simple Consts), compute the new key values.  But first,
--- 578,583 ----
*************** ExecReScanIndexScan(IndexScanState *node
*** 614,634 ****
  			reorderqueue_pop(node);
  	}
  
! 	/*
! 	 * Reset (parallel) index scan.  For parallel-aware nodes, the scan
! 	 * descriptor is initialized during actual execution of node and we can
! 	 * reach here before that (ex. during execution of nest loop join).  So,
! 	 * avoid updating the scan descriptor at that time.
! 	 */
  	if (node->iss_ScanDesc)
- 	{
  		index_rescan(node->iss_ScanDesc,
  					 node->iss_ScanKeys, node->iss_NumScanKeys,
  					 node->iss_OrderByKeys, node->iss_NumOrderByKeys);
- 
- 		if (reset_parallel_scan && node->iss_ScanDesc->parallel_scan)
- 			index_parallelrescan(node->iss_ScanDesc);
- 	}
  	node->iss_ReachedEnd = false;
  
  	ExecScanReScan(&node->ss);
--- 603,613 ----
  			reorderqueue_pop(node);
  	}
  
! 	/* reset index scan */
  	if (node->iss_ScanDesc)
  		index_rescan(node->iss_ScanDesc,
  					 node->iss_ScanKeys, node->iss_NumScanKeys,
  					 node->iss_OrderByKeys, node->iss_NumOrderByKeys);
  	node->iss_ReachedEnd = false;
  
  	ExecScanReScan(&node->ss);
*************** ExecIndexScanInitializeDSM(IndexScanStat
*** 1717,1722 ****
--- 1696,1714 ----
  }
  
  /* ----------------------------------------------------------------
+  *		ExecIndexScanReInitializeDSM
+  *
+  *		Reset shared state before beginning a fresh scan.
+  * ----------------------------------------------------------------
+  */
+ void
+ ExecIndexScanReInitializeDSM(IndexScanState *node,
+ 							 ParallelContext *pcxt)
+ {
+ 	index_parallelrescan(node->iss_ScanDesc);
+ }
+ 
+ /* ----------------------------------------------------------------
   *		ExecIndexScanInitializeWorker
   *
   *		Copy relevant information from TOC into planstate.
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 5c49d4c..d4ac939 100644
*** a/src/backend/executor/nodeSeqscan.c
--- b/src/backend/executor/nodeSeqscan.c
***************
*** 22,27 ****
--- 22,28 ----
   *
   *		ExecSeqScanEstimate		estimates DSM space needed for parallel scan
   *		ExecSeqScanInitializeDSM initialize DSM for parallel scan
+  *		ExecSeqScanReInitializeDSM reinitialize DSM for fresh parallel scan
   *		ExecSeqScanInitializeWorker attach to DSM info in parallel worker
   */
  #include "postgres.h"
*************** ExecSeqScanInitializeDSM(SeqScanState *n
*** 325,330 ****
--- 326,346 ----
  }
  
  /* ----------------------------------------------------------------
+  *		ExecSeqScanReInitializeDSM
+  *
+  *		Reset shared state before beginning a fresh scan.
+  * ----------------------------------------------------------------
+  */
+ void
+ ExecSeqScanReInitializeDSM(SeqScanState *node,
+ 						   ParallelContext *pcxt)
+ {
+ 	HeapScanDesc scan = node->ss.ss_currentScanDesc;
+ 
+ 	heap_parallelscan_reinitialize(scan->rs_parallel);
+ }
+ 
+ /* ----------------------------------------------------------------
   *		ExecSeqScanInitializeWorker
   *
   *		Copy relevant information from TOC into planstate.
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index b2132e7..4e41024 100644
*** a/src/include/access/heapam.h
--- b/src/include/access/heapam.h
*************** extern HeapTuple heap_getnext(HeapScanDe
*** 130,135 ****
--- 130,136 ----
  extern Size heap_parallelscan_estimate(Snapshot snapshot);
  extern void heap_parallelscan_initialize(ParallelHeapScanDesc target,
  							 Relation relation, Snapshot snapshot);
+ extern void heap_parallelscan_reinitialize(ParallelHeapScanDesc parallel_scan);
  extern HeapScanDesc heap_beginscan_parallel(Relation, ParallelHeapScanDesc);
  
  extern bool heap_fetch(Relation relation, Snapshot snapshot,
diff --git a/src/include/executor/execParallel.h b/src/include/executor/execParallel.h
index bd0a87f..a651224 100644
*** a/src/include/executor/execParallel.h
--- b/src/include/executor/execParallel.h
*************** extern ParallelExecutorInfo *ExecInitPar
*** 36,42 ****
  					 EState *estate, int nworkers);
  extern void ExecParallelFinish(ParallelExecutorInfo *pei);
  extern void ExecParallelCleanup(ParallelExecutorInfo *pei);
! extern void ExecParallelReinitialize(ParallelExecutorInfo *pei);
  
  extern void ParallelQueryMain(dsm_segment *seg, shm_toc *toc);
  
--- 36,43 ----
  					 EState *estate, int nworkers);
  extern void ExecParallelFinish(ParallelExecutorInfo *pei);
  extern void ExecParallelCleanup(ParallelExecutorInfo *pei);
! extern void ExecParallelReinitialize(PlanState *planstate,
! 						 ParallelExecutorInfo *pei);
  
  extern void ParallelQueryMain(dsm_segment *seg, shm_toc *toc);
  
diff --git a/src/include/executor/nodeBitmapHeapscan.h b/src/include/executor/nodeBitmapHeapscan.h
index c77694c..10844a4 100644
*** a/src/include/executor/nodeBitmapHeapscan.h
--- b/src/include/executor/nodeBitmapHeapscan.h
*************** extern void ExecBitmapHeapEstimate(Bitma
*** 24,29 ****
--- 24,31 ----
  					   ParallelContext *pcxt);
  extern void ExecBitmapHeapInitializeDSM(BitmapHeapScanState *node,
  							ParallelContext *pcxt);
+ extern void ExecBitmapHeapReInitializeDSM(BitmapHeapScanState *node,
+ 							  ParallelContext *pcxt);
  extern void ExecBitmapHeapInitializeWorker(BitmapHeapScanState *node,
  							   shm_toc *toc);
  
diff --git a/src/include/executor/nodeCustom.h b/src/include/executor/nodeCustom.h
index a1cc63a..25767b6 100644
*** a/src/include/executor/nodeCustom.h
--- b/src/include/executor/nodeCustom.h
*************** extern void ExecCustomScanEstimate(Custo
*** 34,39 ****
--- 34,41 ----
  					   ParallelContext *pcxt);
  extern void ExecCustomScanInitializeDSM(CustomScanState *node,
  							ParallelContext *pcxt);
+ extern void ExecCustomScanReInitializeDSM(CustomScanState *node,
+ 							  ParallelContext *pcxt);
  extern void ExecCustomScanInitializeWorker(CustomScanState *node,
  							   shm_toc *toc);
  extern void ExecShutdownCustomScan(CustomScanState *node);
diff --git a/src/include/executor/nodeForeignscan.h b/src/include/executor/nodeForeignscan.h
index 0b66259..0354c2c 100644
*** a/src/include/executor/nodeForeignscan.h
--- b/src/include/executor/nodeForeignscan.h
*************** extern void ExecForeignScanEstimate(Fore
*** 25,30 ****
--- 25,32 ----
  						ParallelContext *pcxt);
  extern void ExecForeignScanInitializeDSM(ForeignScanState *node,
  							 ParallelContext *pcxt);
+ extern void ExecForeignScanReInitializeDSM(ForeignScanState *node,
+ 							   ParallelContext *pcxt);
  extern void ExecForeignScanInitializeWorker(ForeignScanState *node,
  								shm_toc *toc);
  extern void ExecShutdownForeignScan(ForeignScanState *node);
diff --git a/src/include/executor/nodeIndexonlyscan.h b/src/include/executor/nodeIndexonlyscan.h
index c8a709c..690b5db 100644
*** a/src/include/executor/nodeIndexonlyscan.h
--- b/src/include/executor/nodeIndexonlyscan.h
*************** extern void ExecIndexOnlyScanEstimate(In
*** 28,33 ****
--- 28,35 ----
  						  ParallelContext *pcxt);
  extern void ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node,
  							   ParallelContext *pcxt);
+ extern void ExecIndexOnlyScanReInitializeDSM(IndexOnlyScanState *node,
+ 								 ParallelContext *pcxt);
  extern void ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node,
  								  shm_toc *toc);
  
diff --git a/src/include/executor/nodeIndexscan.h b/src/include/executor/nodeIndexscan.h
index 1668e34..0670e87 100644
*** a/src/include/executor/nodeIndexscan.h
--- b/src/include/executor/nodeIndexscan.h
*************** extern void ExecIndexRestrPos(IndexScanS
*** 24,29 ****
--- 24,30 ----
  extern void ExecReScanIndexScan(IndexScanState *node);
  extern void ExecIndexScanEstimate(IndexScanState *node, ParallelContext *pcxt);
  extern void ExecIndexScanInitializeDSM(IndexScanState *node, ParallelContext *pcxt);
+ extern void ExecIndexScanReInitializeDSM(IndexScanState *node, ParallelContext *pcxt);
  extern void ExecIndexScanInitializeWorker(IndexScanState *node, shm_toc *toc);
  
  /*
diff --git a/src/include/executor/nodeSeqscan.h b/src/include/executor/nodeSeqscan.h
index 0fba79f..eb96799 100644
*** a/src/include/executor/nodeSeqscan.h
--- b/src/include/executor/nodeSeqscan.h
*************** extern void ExecReScanSeqScan(SeqScanSta
*** 24,29 ****
--- 24,30 ----
  /* parallel scan support */
  extern void ExecSeqScanEstimate(SeqScanState *node, ParallelContext *pcxt);
  extern void ExecSeqScanInitializeDSM(SeqScanState *node, ParallelContext *pcxt);
+ extern void ExecSeqScanReInitializeDSM(SeqScanState *node, ParallelContext *pcxt);
  extern void ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc);
  
  #endif							/* NODESEQSCAN_H */
