diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index b802ed247e..205d1da679 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -1555,6 +1555,8 @@ lazy_scan_prune(LVRelState *vacrel, int nnewlpdead; int nfrozen; TransactionId NewRelfrozenXid; + TransactionId FreezeLimit; + bool max_freeze_page; MultiXactId NewRelminMxid; OffsetNumber deadoffsets[MaxHeapTuplesPerPage]; xl_heap_freeze_tuple frozen[MaxHeapTuplesPerPage]; @@ -1571,6 +1573,8 @@ lazy_scan_prune(LVRelState *vacrel, retry: /* Initialize (or reset) page-level state */ + FreezeLimit = vacrel->FreezeLimit; + max_freeze_page = false; NewRelfrozenXid = vacrel->NewRelfrozenXid; NewRelminMxid = vacrel->NewRelminMxid; tuples_deleted = 0; @@ -1777,13 +1781,30 @@ retry: if (heap_prepare_freeze_tuple(tuple.t_data, vacrel->relfrozenxid, vacrel->relminmxid, - vacrel->FreezeLimit, + FreezeLimit, vacrel->MultiXactCutoff, &frozen[nfrozen], &tuple_totally_frozen, &NewRelfrozenXid, &NewRelminMxid)) { /* Will execute freeze below */ frozen[nfrozen++].offset = offnum; + + /* + * If we find one tuple to freeze, we may as well freeze + * aggressively for the rest of this page, since we will be + * dirtying the page anyway and the amount we freeze is just + * a heuristic and unrelated to correctness. + * To do this, set FreezeLimit to the latest possible value + * for the remaining items on the page, so we freeze more often. + * We might choose to retry the whole page with this more + * aggressive value, but that seems considerably more expensive + * than this simple nudge. + */ + if (!max_freeze_page) + { + FreezeLimit = vacrel->OldestXmin; + max_freeze_page = true; + } } /*