@@ -35,6 +35,8 @@ typedef struct
3535
3636 /* tuple visibility test, initialized for the relation */
3737 GlobalVisState * vistest ;
38+ /* whether or not dead items can be set LP_UNUSED during pruning */
39+ bool mark_unused_now ;
3840
3941 TransactionId new_prune_xid ; /* new prune hint value for page */
4042 TransactionId snapshotConflictHorizon ; /* latest xid removed */
@@ -67,6 +69,7 @@ static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid);
6769static void heap_prune_record_redirect (PruneState * prstate ,
6870 OffsetNumber offnum , OffsetNumber rdoffnum );
6971static void heap_prune_record_dead (PruneState * prstate , OffsetNumber offnum );
72+ static void heap_prune_record_dead_or_unused (PruneState * prstate , OffsetNumber offnum );
7073static void heap_prune_record_unused (PruneState * prstate , OffsetNumber offnum );
7174static void page_verify_redirects (Page page );
7275
@@ -148,7 +151,13 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
148151 {
149152 PruneResult presult ;
150153
151- heap_page_prune (relation , buffer , vistest , & presult , NULL );
154+ /*
155+ * For now, pass mark_unused_now as false regardless of whether or
156+ * not the relation has indexes, since we cannot safely determine
157+ * that during on-access pruning with the current implementation.
158+ */
159+ heap_page_prune (relation , buffer , vistest , false,
160+ & presult , NULL );
152161
153162 /*
154163 * Report the number of tuples reclaimed to pgstats. This is
@@ -193,6 +202,9 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
193202 * (see heap_prune_satisfies_vacuum and
194203 * HeapTupleSatisfiesVacuum).
195204 *
205+ * mark_unused_now indicates whether or not dead items can be set LP_UNUSED during
206+ * pruning.
207+ *
196208 * off_loc is the offset location required by the caller to use in error
197209 * callback.
198210 *
@@ -203,6 +215,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer)
203215void
204216heap_page_prune (Relation relation , Buffer buffer ,
205217 GlobalVisState * vistest ,
218+ bool mark_unused_now ,
206219 PruneResult * presult ,
207220 OffsetNumber * off_loc )
208221{
@@ -227,6 +240,7 @@ heap_page_prune(Relation relation, Buffer buffer,
227240 prstate .new_prune_xid = InvalidTransactionId ;
228241 prstate .rel = relation ;
229242 prstate .vistest = vistest ;
243+ prstate .mark_unused_now = mark_unused_now ;
230244 prstate .snapshotConflictHorizon = InvalidTransactionId ;
231245 prstate .nredirected = prstate .ndead = prstate .nunused = 0 ;
232246 memset (prstate .marked , 0 , sizeof (prstate .marked ));
@@ -306,9 +320,9 @@ heap_page_prune(Relation relation, Buffer buffer,
306320 if (off_loc )
307321 * off_loc = offnum ;
308322
309- /* Nothing to do if slot is empty or already dead */
323+ /* Nothing to do if slot is empty */
310324 itemid = PageGetItemId (page , offnum );
311- if (!ItemIdIsUsed (itemid ) || ItemIdIsDead ( itemid ) )
325+ if (!ItemIdIsUsed (itemid ))
312326 continue ;
313327
314328 /* Process this item or chain of items */
@@ -581,7 +595,17 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
581595 * function.)
582596 */
583597 if (ItemIdIsDead (lp ))
598+ {
599+ /*
600+ * If the caller set mark_unused_now true, we can set dead line
601+ * pointers LP_UNUSED now. We don't increment ndeleted here since
602+ * the LP was already marked dead.
603+ */
604+ if (unlikely (prstate -> mark_unused_now ))
605+ heap_prune_record_unused (prstate , offnum );
606+
584607 break ;
608+ }
585609
586610 Assert (ItemIdIsNormal (lp ));
587611 htup = (HeapTupleHeader ) PageGetItem (dp , lp );
@@ -715,7 +739,7 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
715739 * redirect the root to the correct chain member.
716740 */
717741 if (i >= nchain )
718- heap_prune_record_dead (prstate , rootoffnum );
742+ heap_prune_record_dead_or_unused (prstate , rootoffnum );
719743 else
720744 heap_prune_record_redirect (prstate , rootoffnum , chainitems [i ]);
721745 }
@@ -726,9 +750,9 @@ heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum,
726750 * item. This can happen if the loop in heap_page_prune caused us to
727751 * visit the dead successor of a redirect item before visiting the
728752 * redirect item. We can clean up by setting the redirect item to
729- * DEAD state.
753+ * DEAD state or LP_UNUSED if the caller indicated .
730754 */
731- heap_prune_record_dead (prstate , rootoffnum );
755+ heap_prune_record_dead_or_unused (prstate , rootoffnum );
732756 }
733757
734758 return ndeleted ;
@@ -774,6 +798,27 @@ heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum)
774798 prstate -> marked [offnum ] = true;
775799}
776800
801+ /*
802+ * Depending on whether or not the caller set mark_unused_now to true, record that a
803+ * line pointer should be marked LP_DEAD or LP_UNUSED. There are other cases in
804+ * which we will mark line pointers LP_UNUSED, but we will not mark line
805+ * pointers LP_DEAD if mark_unused_now is true.
806+ */
807+ static void
808+ heap_prune_record_dead_or_unused (PruneState * prstate , OffsetNumber offnum )
809+ {
810+ /*
811+ * If the caller set mark_unused_now to true, we can remove dead tuples
812+ * during pruning instead of marking their line pointers dead. Set this
813+ * tuple's line pointer LP_UNUSED. We hint that this option is less
814+ * likely.
815+ */
816+ if (unlikely (prstate -> mark_unused_now ))
817+ heap_prune_record_unused (prstate , offnum );
818+ else
819+ heap_prune_record_dead (prstate , offnum );
820+ }
821+
777822/* Record line pointer to be marked unused */
778823static void
779824heap_prune_record_unused (PruneState * prstate , OffsetNumber offnum )
@@ -903,13 +948,24 @@ heap_page_prune_execute(Buffer buffer,
903948#ifdef USE_ASSERT_CHECKING
904949
905950 /*
906- * Only heap-only tuples can become LP_UNUSED during pruning. They
907- * don't need to be left in place as LP_DEAD items until VACUUM gets
908- * around to doing index vacuuming.
951+ * When heap_page_prune() was called, mark_unused_now may have been
952+ * passed as true, which allows would-be LP_DEAD items to be made
953+ * LP_UNUSED instead. This is only possible if the relation has no
954+ * indexes. If there are any dead items, then mark_unused_now was not
955+ * true and every item being marked LP_UNUSED must refer to a
956+ * heap-only tuple.
909957 */
910- Assert (ItemIdHasStorage (lp ) && ItemIdIsNormal (lp ));
911- htup = (HeapTupleHeader ) PageGetItem (page , lp );
912- Assert (HeapTupleHeaderIsHeapOnly (htup ));
958+ if (ndead > 0 )
959+ {
960+ Assert (ItemIdHasStorage (lp ) && ItemIdIsNormal (lp ));
961+ htup = (HeapTupleHeader ) PageGetItem (page , lp );
962+ Assert (HeapTupleHeaderIsHeapOnly (htup ));
963+ }
964+ else
965+ {
966+ Assert (ItemIdIsUsed (lp ));
967+ }
968+
913969#endif
914970
915971 ItemIdSetUnused (lp );
0 commit comments