diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 8158508..6fca8e3 100644
*** a/src/backend/access/nbtree/nbtree.c
--- b/src/backend/access/nbtree/nbtree.c
*************** btvacuumcleanup(IndexVacuumInfo *info, I
*** 832,840 ****
  		btvacuumscan(info, stats, NULL, NULL, 0);
  	}
  
- 	/* Finally, vacuum the FSM */
- 	IndexFreeSpaceMapVacuum(info->index);
- 
  	/*
  	 * It's quite possible for us to be fooled by concurrent page splits into
  	 * double-counting some index tuples, so disbelieve any total that exceeds
--- 832,837 ----
*************** btvacuumscan(IndexVacuumInfo *info, Inde
*** 976,981 ****
--- 973,993 ----
  
  	MemoryContextDelete(vstate.pagedelcontext);
  
+ 	/*
+ 	 * If we found any recyclable pages (and recorded them in the FSM), then
+ 	 * forcibly update the upper-level FSM pages to ensure that searchers can
+ 	 * find them.  It's possible that the pages were also found during
+ 	 * previous scans and so this is a waste of time, but it's cheap enough
+ 	 * relative to scanning the index that it shouldn't matter much, and
+ 	 * making sure that free pages are available sooner not later seems
+ 	 * worthwhile.
+ 	 *
+ 	 * Note that if no recyclable pages exist, we don't bother vacuuming the
+ 	 * FSM at all.
+ 	 */
+ 	if (vstate.totFreePages > 0)
+ 		IndexFreeSpaceMapVacuum(rel);
+ 
  	/* update statistics */
  	stats->num_pages = num_pages;
  	stats->pages_free = vstate.totFreePages;
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index 72839cb..a83a4b5 100644
*** a/src/backend/access/spgist/spgvacuum.c
--- b/src/backend/access/spgist/spgvacuum.c
*************** spgvacuumscan(spgBulkDeleteState *bds)
*** 846,851 ****
--- 846,866 ----
  	SpGistUpdateMetaPage(index);
  
  	/*
+ 	 * If we found any empty pages (and recorded them in the FSM), then
+ 	 * forcibly update the upper-level FSM pages to ensure that searchers can
+ 	 * find them.  It's possible that the pages were also found during
+ 	 * previous scans and so this is a waste of time, but it's cheap enough
+ 	 * relative to scanning the index that it shouldn't matter much, and
+ 	 * making sure that free pages are available sooner not later seems
+ 	 * worthwhile.
+ 	 *
+ 	 * Note that if no empty pages exist, we don't bother vacuuming the FSM at
+ 	 * all.
+ 	 */
+ 	if (bds->stats->pages_deleted > 0)
+ 		IndexFreeSpaceMapVacuum(index);
+ 
+ 	/*
  	 * Truncate index if possible
  	 *
  	 * XXX disabled because it's unsafe due to possible concurrent inserts.
*************** dummy_callback(ItemPointer itemptr, void
*** 916,922 ****
  IndexBulkDeleteResult *
  spgvacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
  {
- 	Relation	index = info->index;
  	spgBulkDeleteState bds;
  
  	/* No-op in ANALYZE ONLY mode */
--- 931,936 ----
*************** spgvacuumcleanup(IndexVacuumInfo *info, 
*** 926,933 ****
  	/*
  	 * We don't need to scan the index if there was a preceding bulkdelete
  	 * pass.  Otherwise, make a pass that won't delete any live tuples, but
! 	 * might still accomplish useful stuff with redirect/placeholder cleanup,
! 	 * and in any case will provide stats.
  	 */
  	if (stats == NULL)
  	{
--- 940,947 ----
  	/*
  	 * We don't need to scan the index if there was a preceding bulkdelete
  	 * pass.  Otherwise, make a pass that won't delete any live tuples, but
! 	 * might still accomplish useful stuff with redirect/placeholder cleanup
! 	 * and/or FSM housekeeping, and in any case will provide stats.
  	 */
  	if (stats == NULL)
  	{
*************** spgvacuumcleanup(IndexVacuumInfo *info, 
*** 940,948 ****
  		spgvacuumscan(&bds);
  	}
  
- 	/* Finally, vacuum the FSM */
- 	IndexFreeSpaceMapVacuum(index);
- 
  	/*
  	 * It's quite possible for us to be fooled by concurrent tuple moves into
  	 * double-counting some index tuples, so disbelieve any total that exceeds
--- 954,959 ----
