@@ -315,14 +315,6 @@ ProcessSyncRequests(void)
315315 {
316316 int failures ;
317317
318- /*
319- * If fsync is off then we don't have to bother opening the file at
320- * all. (We delay checking until this point so that changing fsync on
321- * the fly behaves sensibly.)
322- */
323- if (!enableFsync )
324- continue ;
325-
326318 /*
327319 * If the entry is new then don't process it this time; it is new.
328320 * Note "continue" bypasses the hash-remove call at the bottom of the
@@ -335,78 +327,88 @@ ProcessSyncRequests(void)
335327 Assert ((CycleCtr ) (entry -> cycle_ctr + 1 ) == sync_cycle_ctr );
336328
337329 /*
338- * If in checkpointer, we want to absorb pending requests every so
339- * often to prevent overflow of the fsync request queue. It is
340- * unspecified whether newly-added entries will be visited by
341- * hash_seq_search, but we don't care since we don't need to process
342- * them anyway.
343- */
344- if (-- absorb_counter <= 0 )
345- {
346- AbsorbSyncRequests ();
347- absorb_counter = FSYNCS_PER_ABSORB ;
348- }
349-
350- /*
351- * The fsync table could contain requests to fsync segments that have
352- * been deleted (unlinked) by the time we get to them. Rather than
353- * just hoping an ENOENT (or EACCES on Windows) error can be ignored,
354- * what we do on error is absorb pending requests and then retry.
355- * Since mdunlink() queues a "cancel" message before actually
356- * unlinking, the fsync request is guaranteed to be marked canceled
357- * after the absorb if it really was this case. DROP DATABASE likewise
358- * has to tell us to forget fsync requests before it starts deletions.
330+ * If fsync is off then we don't have to bother opening the file at
331+ * all. (We delay checking until this point so that changing fsync on
332+ * the fly behaves sensibly.)
359333 */
360- for ( failures = 0 ; ! entry -> canceled ; failures ++ )
334+ if ( enableFsync )
361335 {
362- char path [MAXPGPATH ];
363-
364- INSTR_TIME_SET_CURRENT (sync_start );
365- if (syncsw [entry -> tag .handler ].sync_syncfiletag (& entry -> tag ,
366- path ) == 0 )
367- {
368- /* Success; update statistics about sync timing */
369- INSTR_TIME_SET_CURRENT (sync_end );
370- sync_diff = sync_end ;
371- INSTR_TIME_SUBTRACT (sync_diff , sync_start );
372- elapsed = INSTR_TIME_GET_MICROSEC (sync_diff );
373- if (elapsed > longest )
374- longest = elapsed ;
375- total_elapsed += elapsed ;
376- processed ++ ;
377-
378- if (log_checkpoints )
379- elog (DEBUG1 , "checkpoint sync: number=%d file=%s time=%.3f msec" ,
380- processed ,
381- path ,
382- (double ) elapsed / 1000 );
383-
384- break ; /* out of retry loop */
385- }
386-
387336 /*
388- * It is possible that the relation has been dropped or truncated
389- * since the fsync request was entered. Therefore, allow ENOENT,
390- * but only if we didn't fail already on this file.
337+ * If in checkpointer, we want to absorb pending requests every so
338+ * often to prevent overflow of the fsync request queue. It is
339+ * unspecified whether newly-added entries will be visited by
340+ * hash_seq_search, but we don't care since we don't need to
341+ * process them anyway.
391342 */
392- if (!FILE_POSSIBLY_DELETED (errno ) || failures > 0 )
393- ereport (data_sync_elevel (ERROR ),
394- (errcode_for_file_access (),
395- errmsg ("could not fsync file \"%s\": %m" ,
396- path )));
397- else
398- ereport (DEBUG1 ,
399- (errcode_for_file_access (),
400- errmsg ("could not fsync file \"%s\" but retrying: %m" ,
401- path )));
343+ if (-- absorb_counter <= 0 )
344+ {
345+ AbsorbSyncRequests ();
346+ absorb_counter = FSYNCS_PER_ABSORB ;
347+ }
402348
403349 /*
404- * Absorb incoming requests and check to see if a cancel arrived
405- * for this relation fork.
350+ * The fsync table could contain requests to fsync segments that
351+ * have been deleted (unlinked) by the time we get to them. Rather
352+ * than just hoping an ENOENT (or EACCES on Windows) error can be
353+ * ignored, what we do on error is absorb pending requests and
354+ * then retry. Since mdunlink() queues a "cancel" message before
355+ * actually unlinking, the fsync request is guaranteed to be
356+ * marked canceled after the absorb if it really was this case.
357+ * DROP DATABASE likewise has to tell us to forget fsync requests
358+ * before it starts deletions.
406359 */
407- AbsorbSyncRequests ();
408- absorb_counter = FSYNCS_PER_ABSORB ; /* might as well... */
409- } /* end retry loop */
360+ for (failures = 0 ; !entry -> canceled ; failures ++ )
361+ {
362+ char path [MAXPGPATH ];
363+
364+ INSTR_TIME_SET_CURRENT (sync_start );
365+ if (syncsw [entry -> tag .handler ].sync_syncfiletag (& entry -> tag ,
366+ path ) == 0 )
367+ {
368+ /* Success; update statistics about sync timing */
369+ INSTR_TIME_SET_CURRENT (sync_end );
370+ sync_diff = sync_end ;
371+ INSTR_TIME_SUBTRACT (sync_diff , sync_start );
372+ elapsed = INSTR_TIME_GET_MICROSEC (sync_diff );
373+ if (elapsed > longest )
374+ longest = elapsed ;
375+ total_elapsed += elapsed ;
376+ processed ++ ;
377+
378+ if (log_checkpoints )
379+ elog (DEBUG1 , "checkpoint sync: number=%d file=%s time=%.3f msec" ,
380+ processed ,
381+ path ,
382+ (double ) elapsed / 1000 );
383+
384+ break ; /* out of retry loop */
385+ }
386+
387+ /*
388+ * It is possible that the relation has been dropped or
389+ * truncated since the fsync request was entered. Therefore,
390+ * allow ENOENT, but only if we didn't fail already on this
391+ * file.
392+ */
393+ if (!FILE_POSSIBLY_DELETED (errno ) || failures > 0 )
394+ ereport (data_sync_elevel (ERROR ),
395+ (errcode_for_file_access (),
396+ errmsg ("could not fsync file \"%s\": %m" ,
397+ path )));
398+ else
399+ ereport (DEBUG1 ,
400+ (errcode_for_file_access (),
401+ errmsg ("could not fsync file \"%s\" but retrying: %m" ,
402+ path )));
403+
404+ /*
405+ * Absorb incoming requests and check to see if a cancel
406+ * arrived for this relation fork.
407+ */
408+ AbsorbSyncRequests ();
409+ absorb_counter = FSYNCS_PER_ABSORB ; /* might as well... */
410+ } /* end retry loop */
411+ }
410412
411413 /* We are done with this entry, remove it */
412414 if (hash_search (pendingOps , & entry -> tag , HASH_REMOVE , NULL ) == NULL )
0 commit comments