@@ -208,7 +208,7 @@ typedef struct LVShared
208208 * live tuples in the index vacuum case or the new live tuples in the
209209 * index cleanup case.
210210 *
211- * estimated_count is true if the reltuples is an estimated value.
211+ * estimated_count is true if reltuples is an estimated value.
212212 */
213213 double reltuples ;
214214 bool estimated_count ;
@@ -232,8 +232,8 @@ typedef struct LVShared
232232
233233 /*
234234 * Number of active parallel workers. This is used for computing the
235- * minimum threshold of the vacuum cost balance for a worker to go for the
236- * delay.
235+ * minimum threshold of the vacuum cost balance before a worker sleeps for
236+ * cost-based delay.
237237 */
238238 pg_atomic_uint32 active_nworkers ;
239239
@@ -732,7 +732,7 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
732732 * to reclaim dead line pointers.
733733 *
734734 * If the table has at least two indexes, we execute both index vacuum
735- * and index cleanup with parallel workers unless the parallel vacuum is
735+ * and index cleanup with parallel workers unless parallel vacuum is
736736 * disabled. In a parallel vacuum, we enter parallel mode and then
737737 * create both the parallel context and the DSM segment before starting
738738 * heap scan so that we can record dead tuples to the DSM segment. All
@@ -809,8 +809,8 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
809809 vacrelstats -> latestRemovedXid = InvalidTransactionId ;
810810
811811 /*
812- * Initialize the state for a parallel vacuum. As of now, only one worker
813- * can be used for an index, so we invoke parallelism only if there are at
812+ * Initialize state for a parallel vacuum. As of now, only one worker can
813+ * be used for an index, so we invoke parallelism only if there are at
814814 * least two indexes on a table.
815815 */
816816 if (params -> nworkers >= 0 && vacrelstats -> useindex && nindexes > 1 )
@@ -837,7 +837,7 @@ lazy_scan_heap(Relation onerel, VacuumParams *params, LVRelStats *vacrelstats,
837837 }
838838
839839 /*
840- * Allocate the space for dead tuples in case the parallel vacuum is not
840+ * Allocate the space for dead tuples in case parallel vacuum is not
841841 * initialized.
842842 */
843843 if (!ParallelVacuumIsActive (lps ))
@@ -2215,7 +2215,7 @@ parallel_vacuum_index(Relation *Irel, IndexBulkDeleteResult **stats,
22152215 shared_indstats = get_indstats (lvshared , idx );
22162216
22172217 /*
2218- * Skip processing indexes that doesn 't participate in parallel
2218+ * Skip processing indexes that don 't participate in parallel
22192219 * operation
22202220 */
22212221 if (shared_indstats == NULL ||
@@ -2312,12 +2312,12 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
23122312
23132313 /*
23142314 * Copy the index bulk-deletion result returned from ambulkdelete and
2315- * amvacuumcleanup to the DSM segment if it's the first time to get it
2316- * from them, because they allocate it locally and it's possible that an
2317- * index will be vacuumed by the different vacuum process at the next
2318- * time. The copying of the result normally happens only after the first
2319- * time of index vacuuming. From the second time, we pass the result on
2320- * the DSM segment so that they then update it directly.
2315+ * amvacuumcleanup to the DSM segment if it's the first cycle because they
2316+ * allocate locally and it's possible that an index will be vacuumed by a
2317+ * different vacuum process the next cycle. Copying the result normally
2318+ * happens only the first time an index is vacuumed. For any additional
2319+ * vacuum pass, we directly point to the result on the DSM segment and
2320+ * pass it to vacuum index APIs so that workers can update it directly.
23212321 *
23222322 * Since all vacuum workers write the bulk-deletion result at different
23232323 * slots we can write them without locking.
@@ -2328,8 +2328,8 @@ vacuum_one_index(Relation indrel, IndexBulkDeleteResult **stats,
23282328 shared_indstats -> updated = true;
23292329
23302330 /*
2331- * Now that the stats[idx] points to the DSM segment, we don't need
2332- * the locally allocated results.
2331+ * Now that stats[idx] points to the DSM segment, we don't need the
2332+ * locally allocated results.
23332333 */
23342334 pfree (* stats );
23352335 * stats = bulkdelete_res ;
@@ -2449,7 +2449,7 @@ lazy_vacuum_index(Relation indrel, IndexBulkDeleteResult **stats,
24492449 * lazy_cleanup_index() -- do post-vacuum cleanup for one index relation.
24502450 *
24512451 * reltuples is the number of heap tuples and estimated_count is true
2452- * if the reltuples is an estimated value.
2452+ * if reltuples is an estimated value.
24532453 */
24542454static void
24552455lazy_cleanup_index (Relation indrel ,
@@ -3050,9 +3050,9 @@ heap_page_is_all_visible(Relation rel, Buffer buf,
30503050/*
30513051 * Compute the number of parallel worker processes to request. Both index
30523052 * vacuum and index cleanup can be executed with parallel workers. The index
3053- * is eligible for parallel vacuum iff it's size is greater than
3053+ * is eligible for parallel vacuum iff its size is greater than
30543054 * min_parallel_index_scan_size as invoking workers for very small indexes
3055- * can hurt the performance.
3055+ * can hurt performance.
30563056 *
30573057 * nrequested is the number of parallel workers that user requested. If
30583058 * nrequested is 0, we compute the parallel degree based on nindexes, that is
@@ -3071,7 +3071,7 @@ compute_parallel_vacuum_workers(Relation *Irel, int nindexes, int nrequested,
30713071 int i ;
30723072
30733073 /*
3074- * We don't allow to perform parallel operation in standalone backend or
3074+ * We don't allow performing parallel operation in standalone backend or
30753075 * when parallelism is disabled.
30763076 */
30773077 if (!IsUnderPostmaster || max_parallel_maintenance_workers == 0 )
@@ -3138,13 +3138,13 @@ prepare_index_statistics(LVShared *lvshared, bool *can_parallel_vacuum,
31383138 if (!can_parallel_vacuum [i ])
31393139 continue ;
31403140
3141- /* Set NOT NULL as this index do support parallelism */
3141+ /* Set NOT NULL as this index does support parallelism */
31423142 lvshared -> bitmap [i >> 3 ] |= 1 << (i & 0x07 );
31433143 }
31443144}
31453145
31463146/*
3147- * Update index statistics in pg_class if the statistics is accurate.
3147+ * Update index statistics in pg_class if the statistics are accurate.
31483148 */
31493149static void
31503150update_index_statistics (Relation * Irel , IndexBulkDeleteResult * * stats ,
@@ -3174,7 +3174,7 @@ update_index_statistics(Relation *Irel, IndexBulkDeleteResult **stats,
31743174
31753175/*
31763176 * This function prepares and returns parallel vacuum state if we can launch
3177- * even one worker. This function is responsible to enter parallel mode,
3177+ * even one worker. This function is responsible for entering parallel mode,
31783178 * create a parallel context, and then initialize the DSM segment.
31793179 */
31803180static LVParallelState *
@@ -3345,8 +3345,8 @@ begin_parallel_vacuum(Oid relid, Relation *Irel, LVRelStats *vacrelstats,
33453345/*
33463346 * Destroy the parallel context, and end parallel mode.
33473347 *
3348- * Since writes are not allowed during the parallel mode, so we copy the
3349- * updated index statistics from DSM in local memory and then later use that
3348+ * Since writes are not allowed during parallel mode, copy the
3349+ * updated index statistics from DSM into local memory and then later use that
33503350 * to update the index statistics. One might think that we can exit from
33513351 * parallel mode, update the index statistics and then destroy parallel
33523352 * context, but that won't be safe (see ExitParallelMode).
@@ -3452,7 +3452,7 @@ skip_parallel_vacuum_index(Relation indrel, LVShared *lvshared)
34523452 * Perform work within a launched parallel process.
34533453 *
34543454 * Since parallel vacuum workers perform only index vacuum or index cleanup,
3455- * we don't need to report the progress information.
3455+ * we don't need to report progress information.
34563456 */
34573457void
34583458parallel_vacuum_main (dsm_segment * seg , shm_toc * toc )
0 commit comments