@@ -143,6 +143,7 @@ typedef struct ProcState
143143 int nextMsgNum ; /* next message number to read */
144144 bool resetState ; /* backend needs to reset its state */
145145 bool signaled ; /* backend has been sent catchup signal */
146+ bool hasMessages ; /* backend has unread messages */
146147
147148 /*
148149 * Backend only sends invalidations, never receives them. This only makes
@@ -248,6 +249,7 @@ CreateSharedInvalidationState(void)
248249 shmInvalBuffer -> procState [i ].nextMsgNum = 0 ; /* meaningless */
249250 shmInvalBuffer -> procState [i ].resetState = false;
250251 shmInvalBuffer -> procState [i ].signaled = false;
252+ shmInvalBuffer -> procState [i ].hasMessages = false;
251253 shmInvalBuffer -> procState [i ].nextLXID = InvalidLocalTransactionId ;
252254 }
253255}
@@ -264,11 +266,9 @@ SharedInvalBackendInit(bool sendOnly)
264266 SISeg * segP = shmInvalBuffer ;
265267
266268 /*
267- * This can run in parallel with read operations, and for that matter with
268- * write operations; but not in parallel with additions and removals of
269- * backends, nor in parallel with SICleanupQueue. It doesn't seem worth
270- * having a third lock, so we choose to use SInvalWriteLock to serialize
271- * additions/removals.
269+ * This can run in parallel with read operations, but not with write
270+ * operations, since SIInsertDataEntries relies on lastBackend to set
271+ * hasMessages appropriately.
272272 */
273273 LWLockAcquire (SInvalWriteLock , LW_EXCLUSIVE );
274274
@@ -316,6 +316,7 @@ SharedInvalBackendInit(bool sendOnly)
316316 stateP -> nextMsgNum = segP -> maxMsgNum ;
317317 stateP -> resetState = false;
318318 stateP -> signaled = false;
319+ stateP -> hasMessages = false;
319320 stateP -> sendOnly = sendOnly ;
320321
321322 LWLockRelease (SInvalWriteLock );
@@ -417,6 +418,7 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
417418 int nthistime = Min (n , WRITE_QUANTUM );
418419 int numMsgs ;
419420 int max ;
421+ int i ;
420422
421423 n -= nthistime ;
422424
@@ -459,6 +461,19 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
459461 SpinLockRelease (& vsegP -> msgnumLock );
460462 }
461463
464+ /*
465+ * Now that the maxMsgNum change is globally visible, we give
466+ * everyone a swift kick to make sure they read the newly added
467+ * messages. Releasing SInvalWriteLock will enforce a full memory
468+ * barrier, so these (unlocked) changes will be committed to memory
469+ * before we exit the function.
470+ */
471+ for (i = 0 ; i < segP -> lastBackend ; i ++ )
472+ {
473+ ProcState * stateP = & segP -> procState [i ];
474+ stateP -> hasMessages = TRUE;
475+ }
476+
462477 LWLockRelease (SInvalWriteLock );
463478 }
464479}
@@ -499,11 +514,36 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
499514 int max ;
500515 int n ;
501516
502- LWLockAcquire (SInvalReadLock , LW_SHARED );
503-
504517 segP = shmInvalBuffer ;
505518 stateP = & segP -> procState [MyBackendId - 1 ];
506519
520+ /*
521+ * Before starting to take locks, do a quick, unlocked test to see whether
522+ * there can possibly be anything to read. On a multiprocessor system,
523+ * it's possible that this load could migrate backwards and occur before we
524+ * actually enter this function, so we might miss a sinval message that
525+ * was just added by some other processor. But they can't migrate
526+ * backwards over a preceding lock acquisition, so it should be OK. If
527+ * we haven't acquired a lock preventing against further relevant
528+ * invalidations, any such occurrence is not much different than if the
529+ * invalidation had arrived slightly later in the first place.
530+ */
531+ if (!stateP -> hasMessages )
532+ return 0 ;
533+
534+ LWLockAcquire (SInvalReadLock , LW_SHARED );
535+
536+ /*
537+ * We must reset hasMessages before determining how many messages we're
538+ * going to read. That way, if new messages arrive after we have
539+ * determined how many we're reading, the flag will get reset and we'll
540+ * notice those messages part-way through.
541+ *
542+ * Note that, if we don't end up reading all of the messages, we had
543+ * better be certain to reset this flag before exiting!
544+ */
545+ stateP -> hasMessages = FALSE;
546+
507547 /* Fetch current value of maxMsgNum using spinlock */
508548 {
509549 /* use volatile pointer to prevent code rearrangement */
@@ -544,10 +584,16 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
544584 }
545585
546586 /*
547- * Reset our "signaled" flag whenever we have caught up completely.
587+ * If we have caught up completely, reset our "signaled" flag so that
588+ * we'll get another signal if we fall behind again.
589+ *
590+ * If we haven't catch up completely, reset the hasMessages flag so that
591+ * we see the remaining messages next time.
548592 */
549593 if (stateP -> nextMsgNum >= max )
550594 stateP -> signaled = false;
595+ else
596+ stateP -> hasMessages = TRUE;
551597
552598 LWLockRelease (SInvalReadLock );
553599 return n ;
0 commit comments