@@ -137,8 +137,9 @@ static BufferAccessStrategy vac_strategy;
137137
138138
139139/* non-export function prototypes */
140- static void lazy_scan_heap (Relation onerel , LVRelStats * vacrelstats ,
141- Relation * Irel , int nindexes , bool aggressive );
140+ static void lazy_scan_heap (Relation onerel , int options ,
141+ LVRelStats * vacrelstats , Relation * Irel , int nindexes ,
142+ bool aggressive );
142143static void lazy_vacuum_heap (Relation onerel , LVRelStats * vacrelstats );
143144static bool lazy_check_needs_freeze (Buffer buf , bool * hastup );
144145static void lazy_vacuum_index (Relation indrel ,
@@ -223,15 +224,17 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
223224 & MultiXactCutoff , & mxactFullScanLimit );
224225
225226 /*
226- * We request an aggressive scan if either the table's frozen Xid is now
227- * older than or equal to the requested Xid full-table scan limit; or if
228- * the table's minimum MultiXactId is older than or equal to the requested
229- * mxid full-table scan limit.
227+ * We request an aggressive scan if the table's frozen Xid is now older
228+ * than or equal to the requested Xid full-table scan limit; or if the
229+ * table's minimum MultiXactId is older than or equal to the requested
230+ * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified .
230231 */
231232 aggressive = TransactionIdPrecedesOrEquals (onerel -> rd_rel -> relfrozenxid ,
232233 xidFullScanLimit );
233234 aggressive |= MultiXactIdPrecedesOrEquals (onerel -> rd_rel -> relminmxid ,
234235 mxactFullScanLimit );
236+ if (options & VACOPT_DISABLE_PAGE_SKIPPING )
237+ aggressive = true;
235238
236239 vacrelstats = (LVRelStats * ) palloc0 (sizeof (LVRelStats ));
237240
@@ -246,7 +249,7 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
246249 vacrelstats -> hasindex = (nindexes > 0 );
247250
248251 /* Do the vacuuming */
249- lazy_scan_heap (onerel , vacrelstats , Irel , nindexes , aggressive );
252+ lazy_scan_heap (onerel , options , vacrelstats , Irel , nindexes , aggressive );
250253
251254 /* Done with indexes */
252255 vac_close_indexes (nindexes , Irel , NoLock );
@@ -441,7 +444,7 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
441444 * reference them have been killed.
442445 */
443446static void
444- lazy_scan_heap (Relation onerel , LVRelStats * vacrelstats ,
447+ lazy_scan_heap (Relation onerel , int options , LVRelStats * vacrelstats ,
445448 Relation * Irel , int nindexes , bool aggressive )
446449{
447450 BlockNumber nblocks ,
@@ -542,25 +545,28 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
542545 * the last page. This is worth avoiding mainly because such a lock must
543546 * be replayed on any hot standby, where it can be disruptive.
544547 */
545- for (next_unskippable_block = 0 ;
546- next_unskippable_block < nblocks ;
547- next_unskippable_block ++ )
548+ next_unskippable_block = 0 ;
549+ if ((options & VACOPT_DISABLE_PAGE_SKIPPING ) == 0 )
548550 {
549- uint8 vmstatus ;
550-
551- vmstatus = visibilitymap_get_status (onerel , next_unskippable_block ,
552- & vmbuffer );
553- if (aggressive )
551+ while (next_unskippable_block < nblocks )
554552 {
555- if ((vmstatus & VISIBILITYMAP_ALL_FROZEN ) == 0 )
556- break ;
557- }
558- else
559- {
560- if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
561- break ;
553+ uint8 vmstatus ;
554+
555+ vmstatus = visibilitymap_get_status (onerel , next_unskippable_block ,
556+ & vmbuffer );
557+ if (aggressive )
558+ {
559+ if ((vmstatus & VISIBILITYMAP_ALL_FROZEN ) == 0 )
560+ break ;
561+ }
562+ else
563+ {
564+ if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
565+ break ;
566+ }
567+ vacuum_delay_point ();
568+ next_unskippable_block ++ ;
562569 }
563- vacuum_delay_point ();
564570 }
565571
566572 if (next_unskippable_block >= SKIP_PAGES_THRESHOLD )
@@ -594,26 +600,29 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
594600 if (blkno == next_unskippable_block )
595601 {
596602 /* Time to advance next_unskippable_block */
597- for (next_unskippable_block ++ ;
598- next_unskippable_block < nblocks ;
599- next_unskippable_block ++ )
603+ next_unskippable_block ++ ;
604+ if ((options & VACOPT_DISABLE_PAGE_SKIPPING ) == 0 )
600605 {
601- uint8 vmskipflags ;
602-
603- vmskipflags = visibilitymap_get_status (onerel ,
604- next_unskippable_block ,
605- & vmbuffer );
606- if (aggressive )
606+ while (next_unskippable_block < nblocks )
607607 {
608- if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN ) == 0 )
609- break ;
610- }
611- else
612- {
613- if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
614- break ;
608+ uint8 vmskipflags ;
609+
610+ vmskipflags = visibilitymap_get_status (onerel ,
611+ next_unskippable_block ,
612+ & vmbuffer );
613+ if (aggressive )
614+ {
615+ if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN ) == 0 )
616+ break ;
617+ }
618+ else
619+ {
620+ if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
621+ break ;
622+ }
623+ vacuum_delay_point ();
624+ next_unskippable_block ++ ;
615625 }
616- vacuum_delay_point ();
617626 }
618627
619628 /*
@@ -1054,7 +1063,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
10541063 }
10551064 else
10561065 {
1057- bool tuple_totally_frozen ;
1066+ bool tuple_totally_frozen ;
10581067
10591068 num_tuples += 1 ;
10601069 hastup = true;
@@ -1064,8 +1073,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
10641073 * freezing. Note we already have exclusive buffer lock.
10651074 */
10661075 if (heap_prepare_freeze_tuple (tuple .t_data , FreezeLimit ,
1067- MultiXactCutoff , & frozen [nfrozen ],
1068- & tuple_totally_frozen ))
1076+ MultiXactCutoff , & frozen [nfrozen ],
1077+ & tuple_totally_frozen ))
10691078 frozen [nfrozen ++ ].offset = offnum ;
10701079
10711080 if (!tuple_totally_frozen )
0 commit comments