@@ -501,7 +501,7 @@ typedef struct XLogCtlData
501501 * WALBufMappingLock.
502502 */
503503 char * pages ; /* buffers for unwritten XLOG pages */
504- XLogRecPtr * xlblocks ; /* 1st byte ptr-s + XLOG_BLCKSZ */
504+ pg_atomic_uint64 * xlblocks ; /* 1st byte ptr-s + XLOG_BLCKSZ */
505505 int XLogCacheBlck ; /* highest allocated xlog buffer index */
506506
507507 /*
@@ -1636,20 +1636,16 @@ GetXLogBuffer(XLogRecPtr ptr, TimeLineID tli)
16361636 * out to disk and evicted, and the caller is responsible for making sure
16371637 * that doesn't happen.
16381638 *
1639- * However, we don't hold a lock while we read the value. If someone has
1640- * just initialized the page, it's possible that we get a "torn read" of
1641- * the XLogRecPtr if 64-bit fetches are not atomic on this platform. In
1642- * that case we will see a bogus value. That's ok, we'll grab the mapping
1643- * lock (in AdvanceXLInsertBuffer) and retry if we see anything else than
1644- * the page we're looking for. But it means that when we do this unlocked
1645- * read, we might see a value that appears to be ahead of the page we're
1646- * looking for. Don't PANIC on that, until we've verified the value while
1647- * holding the lock.
1639+ * We don't hold a lock while we read the value. If someone is just about
1640+ * to initialize or has just initialized the page, it's possible that we
1641+ * get InvalidXLogRecPtr. That's ok, we'll grab the mapping lock (in
1642+ * AdvanceXLInsertBuffer) and retry if we see anything other than the page
1643+ * we're looking for.
16481644 */
16491645 expectedEndPtr = ptr ;
16501646 expectedEndPtr += XLOG_BLCKSZ - ptr % XLOG_BLCKSZ ;
16511647
1652- endptr = XLogCtl -> xlblocks [idx ];
1648+ endptr = pg_atomic_read_u64 ( & XLogCtl -> xlblocks [idx ]) ;
16531649 if (expectedEndPtr != endptr )
16541650 {
16551651 XLogRecPtr initializedUpto ;
@@ -1680,7 +1676,7 @@ GetXLogBuffer(XLogRecPtr ptr, TimeLineID tli)
16801676 WALInsertLockUpdateInsertingAt (initializedUpto );
16811677
16821678 AdvanceXLInsertBuffer (ptr , tli , false);
1683- endptr = XLogCtl -> xlblocks [idx ];
1679+ endptr = pg_atomic_read_u64 ( & XLogCtl -> xlblocks [idx ]) ;
16841680
16851681 if (expectedEndPtr != endptr )
16861682 elog (PANIC , "could not find WAL buffer for %X/%X" ,
@@ -1867,7 +1863,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
18671863 * be zero if the buffer hasn't been used yet). Fall through if it's
18681864 * already written out.
18691865 */
1870- OldPageRqstPtr = XLogCtl -> xlblocks [nextidx ];
1866+ OldPageRqstPtr = pg_atomic_read_u64 ( & XLogCtl -> xlblocks [nextidx ]) ;
18711867 if (LogwrtResult .Write < OldPageRqstPtr )
18721868 {
18731869 /*
@@ -1989,8 +1985,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
19891985 */
19901986 pg_write_barrier ();
19911987
1992- * ((volatile XLogRecPtr * ) & XLogCtl -> xlblocks [nextidx ]) = NewPageEndPtr ;
1993-
1988+ pg_atomic_write_u64 (& XLogCtl -> xlblocks [nextidx ], NewPageEndPtr );
19941989 XLogCtl -> InitializedUpTo = NewPageEndPtr ;
19951990
19961991 npages ++ ;
@@ -2208,7 +2203,7 @@ XLogWrite(XLogwrtRqst WriteRqst, TimeLineID tli, bool flexible)
22082203 * if we're passed a bogus WriteRqst.Write that is past the end of the
22092204 * last page that's been initialized by AdvanceXLInsertBuffer.
22102205 */
2211- XLogRecPtr EndPtr = XLogCtl -> xlblocks [curridx ];
2206+ XLogRecPtr EndPtr = pg_atomic_read_u64 ( & XLogCtl -> xlblocks [curridx ]) ;
22122207
22132208 if (LogwrtResult .Write >= EndPtr )
22142209 elog (PANIC , "xlog write request %X/%X is past end of log %X/%X" ,
@@ -4632,7 +4627,7 @@ XLOGShmemSize(void)
46324627 /* WAL insertion locks, plus alignment */
46334628 size = add_size (size , mul_size (sizeof (WALInsertLockPadded ), NUM_XLOGINSERT_LOCKS + 1 ));
46344629 /* xlblocks array */
4635- size = add_size (size , mul_size (sizeof (XLogRecPtr ), XLOGbuffers ));
4630+ size = add_size (size , mul_size (sizeof (pg_atomic_uint64 ), XLOGbuffers ));
46364631 /* extra alignment padding for XLOG I/O buffers */
46374632 size = add_size (size , Max (XLOG_BLCKSZ , PG_IO_ALIGN_SIZE ));
46384633 /* and the buffers themselves */
@@ -4710,10 +4705,13 @@ XLOGShmemInit(void)
47104705 * needed here.
47114706 */
47124707 allocptr = ((char * ) XLogCtl ) + sizeof (XLogCtlData );
4713- XLogCtl -> xlblocks = (XLogRecPtr * ) allocptr ;
4714- memset (XLogCtl -> xlblocks , 0 , sizeof (XLogRecPtr ) * XLOGbuffers );
4715- allocptr += sizeof (XLogRecPtr ) * XLOGbuffers ;
4708+ XLogCtl -> xlblocks = (pg_atomic_uint64 * ) allocptr ;
4709+ allocptr += sizeof (pg_atomic_uint64 ) * XLOGbuffers ;
47164710
4711+ for (i = 0 ; i < XLOGbuffers ; i ++ )
4712+ {
4713+ pg_atomic_init_u64 (& XLogCtl -> xlblocks [i ], InvalidXLogRecPtr );
4714+ }
47174715
47184716 /* WAL insertion locks. Ensure they're aligned to the full padded size */
47194717 allocptr += sizeof (WALInsertLockPadded ) -
@@ -5750,7 +5748,7 @@ StartupXLOG(void)
57505748 memcpy (page , endOfRecoveryInfo -> lastPage , len );
57515749 memset (page + len , 0 , XLOG_BLCKSZ - len );
57525750
5753- XLogCtl -> xlblocks [firstIdx ] = endOfRecoveryInfo -> lastPageBeginPtr + XLOG_BLCKSZ ;
5751+ pg_atomic_write_u64 ( & XLogCtl -> xlblocks [firstIdx ], endOfRecoveryInfo -> lastPageBeginPtr + XLOG_BLCKSZ ) ;
57545752 XLogCtl -> InitializedUpTo = endOfRecoveryInfo -> lastPageBeginPtr + XLOG_BLCKSZ ;
57555753 }
57565754 else
0 commit comments