PostgreSQL Source Code git master
visibilitymap.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * visibilitymap.c
4 * bitmap for tracking visibility of heap tuples
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/heap/visibilitymap.c
12 *
13 * INTERFACE ROUTINES
14 * visibilitymap_clear - clear bits for one page in the visibility map
15 * visibilitymap_pin - pin a map page for setting a bit
16 * visibilitymap_pin_ok - check whether correct map page is already pinned
17 * visibilitymap_set - set bit(s) in a previously pinned page and log
18 * visibilitymap_set_vmbits - set bit(s) in a pinned page
19 * visibilitymap_get_status - get status of bits
20 * visibilitymap_count - count number of bits set in visibility map
21 * visibilitymap_prepare_truncate -
22 * prepare for truncation of the visibility map
23 *
24 * NOTES
25 *
26 * The visibility map is a bitmap with two bits (all-visible and all-frozen)
27 * per heap page. A set all-visible bit means that all tuples on the page are
28 * known visible to all transactions, and therefore the page doesn't need to
29 * be vacuumed. A set all-frozen bit means that all tuples on the page are
30 * completely frozen, and therefore the page doesn't need to be vacuumed even
31 * if whole table scanning vacuum is required (e.g. anti-wraparound vacuum).
32 * The all-frozen bit must be set only when the page is already all-visible.
33 *
34 * The map is conservative in the sense that we make sure that whenever a bit
35 * is set, we know the condition is true, but if a bit is not set, it might or
36 * might not be true.
37 *
38 * Clearing visibility map bits is not separately WAL-logged. The callers
39 * must make sure that whenever a bit is cleared, the bit is cleared on WAL
40 * replay of the updating operation as well.
41 *
42 * When we *set* a visibility map during VACUUM, we must write WAL. This may
43 * seem counterintuitive, since the bit is basically a hint: if it is clear,
44 * it may still be the case that every tuple on the page is visible to all
45 * transactions; we just don't know that for certain. The difficulty is that
46 * there are two bits which are typically set together: the PD_ALL_VISIBLE bit
47 * on the page itself, and the visibility map bit. If a crash occurs after the
48 * visibility map page makes it to disk and before the updated heap page makes
49 * it to disk, redo must set the bit on the heap page. Otherwise, the next
50 * insert, update, or delete on the heap page will fail to realize that the
51 * visibility map bit must be cleared, possibly causing index-only scans to
52 * return wrong answers.
53 *
54 * VACUUM will normally skip pages for which the visibility map bit is set;
55 * such pages can't contain any dead tuples and therefore don't need vacuuming.
56 *
57 * LOCKING
58 *
59 * In heapam.c, whenever a page is modified so that not all tuples on the
60 * page are visible to everyone anymore, the corresponding bit in the
61 * visibility map is cleared. In order to be crash-safe, we need to do this
62 * while still holding a lock on the heap page and in the same critical
63 * section that logs the page modification. However, we don't want to hold
64 * the buffer lock over any I/O that may be required to read in the visibility
65 * map page. To avoid this, we examine the heap page before locking it;
66 * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
67 * bit. Then, we lock the buffer. But this creates a race condition: there
68 * is a possibility that in the time it takes to lock the buffer, the
69 * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
70 * buffer, pin the visibility map page, and relock the buffer. This shouldn't
71 * happen often, because only VACUUM currently sets visibility map bits,
72 * and the race will only occur if VACUUM processes a given page at almost
73 * exactly the same time that someone tries to further modify it.
74 *
75 * To set a bit, you need to hold a lock on the heap page. That prevents
76 * the race condition where VACUUM sees that all tuples on the page are
77 * visible to everyone, but another backend modifies the page before VACUUM
78 * sets the bit in the visibility map.
79 *
80 * When a bit is set, the LSN of the visibility map page is updated to make
81 * sure that the visibility map update doesn't get written to disk before the
82 * WAL record of the changes that made it possible to set the bit is flushed.
83 * But when a bit is cleared, we don't have to do that because it's always
84 * safe to clear a bit in the map from correctness point of view.
85 *
86 *-------------------------------------------------------------------------
87 */
88#include "postgres.h"
89
90#include "access/heapam_xlog.h"
92#include "access/xloginsert.h"
93#include "access/xlogutils.h"
94#include "miscadmin.h"
95#include "port/pg_bitutils.h"
96#include "storage/bufmgr.h"
97#include "storage/smgr.h"
98#include "utils/inval.h"
99#include "utils/rel.h"
100
101
102/*#define TRACE_VISIBILITYMAP */
103
104/*
105 * Size of the bitmap on each visibility map page, in bytes. There's no
106 * extra headers, so the whole page minus the standard page header is
107 * used for the bitmap.
108 */
109#define MAPSIZE (BLCKSZ - MAXALIGN(SizeOfPageHeaderData))
110
111/* Number of heap blocks we can represent in one byte */
112#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
113
114/* Number of heap blocks we can represent in one visibility map page. */
115#define HEAPBLOCKS_PER_PAGE (MAPSIZE * HEAPBLOCKS_PER_BYTE)
116
117/* Mapping from heap block number to the right bit in the visibility map */
118#define HEAPBLK_TO_MAPBLOCK(x) ((x) / HEAPBLOCKS_PER_PAGE)
119#define HEAPBLK_TO_MAPBYTE(x) (((x) % HEAPBLOCKS_PER_PAGE) / HEAPBLOCKS_PER_BYTE)
120#define HEAPBLK_TO_OFFSET(x) (((x) % HEAPBLOCKS_PER_BYTE) * BITS_PER_HEAPBLOCK)
121
122/* Masks for counting subsets of bits in the visibility map. */
123#define VISIBLE_MASK8 (0x55) /* The lower bit of each bit pair */
124#define FROZEN_MASK8 (0xaa) /* The upper bit of each bit pair */
125
126/* prototypes for internal routines */
127static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend);
128static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks);
129
130
131/*
132 * visibilitymap_clear - clear specified bits for one page in visibility map
133 *
134 * You must pass a buffer containing the correct map page to this function.
135 * Call visibilitymap_pin first to pin the right one. This function doesn't do
136 * any I/O. Returns true if any bits have been cleared and false otherwise.
137 */
138bool
140{
141 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
142 int mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
143 int mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
144 uint8 mask = flags << mapOffset;
145 char *map;
146 bool cleared = false;
147
148 /* Must never clear all_visible bit while leaving all_frozen bit set */
151
152#ifdef TRACE_VISIBILITYMAP
153 elog(DEBUG1, "vm_clear %s %d", RelationGetRelationName(rel), heapBlk);
154#endif
155
156 if (!BufferIsValid(vmbuf) || BufferGetBlockNumber(vmbuf) != mapBlock)
157 elog(ERROR, "wrong buffer passed to visibilitymap_clear");
158
160 map = PageGetContents(BufferGetPage(vmbuf));
161
162 if (map[mapByte] & mask)
163 {
164 map[mapByte] &= ~mask;
165
166 MarkBufferDirty(vmbuf);
167 cleared = true;
168 }
169
171
172 return cleared;
173}
174
175/*
176 * visibilitymap_pin - pin a map page for setting a bit
177 *
178 * Setting a bit in the visibility map is a two-phase operation. First, call
179 * visibilitymap_pin, to pin the visibility map page containing the bit for
180 * the heap page. Because that can require I/O to read the map page, you
181 * shouldn't hold a lock on the heap page while doing that. Then, call
182 * visibilitymap_set to actually set the bit.
183 *
184 * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by
185 * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
186 * relation. On return, *vmbuf is a valid buffer with the map page containing
187 * the bit for heapBlk.
188 *
189 * If the page doesn't exist in the map file yet, it is extended.
190 */
191void
193{
194 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
195
196 /* Reuse the old pinned buffer if possible */
197 if (BufferIsValid(*vmbuf))
198 {
199 if (BufferGetBlockNumber(*vmbuf) == mapBlock)
200 return;
201
202 ReleaseBuffer(*vmbuf);
203 }
204 *vmbuf = vm_readbuf(rel, mapBlock, true);
205}
206
207/*
208 * visibilitymap_pin_ok - do we already have the correct page pinned?
209 *
210 * On entry, vmbuf should be InvalidBuffer or a valid buffer returned by
211 * an earlier call to visibilitymap_pin or visibilitymap_get_status on the same
212 * relation. The return value indicates whether the buffer covers the
213 * given heapBlk.
214 */
215bool
217{
218 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
219
220 return BufferIsValid(vmbuf) && BufferGetBlockNumber(vmbuf) == mapBlock;
221}
222
223/*
224 * visibilitymap_set - set bit(s) on a previously pinned page
225 *
226 * recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
227 * or InvalidXLogRecPtr in normal running. The VM page LSN is advanced to the
228 * one provided; in normal running, we generate a new XLOG record and set the
229 * page LSN to that value (though the heap page's LSN may *not* be updated;
230 * see below). cutoff_xid is the largest xmin on the page being marked
231 * all-visible; it is needed for Hot Standby, and can be InvalidTransactionId
232 * if the page contains no tuples. It can also be set to InvalidTransactionId
233 * when a page that is already all-visible is being marked all-frozen.
234 *
235 * Caller is expected to set the heap page's PD_ALL_VISIBLE bit before calling
236 * this function. Except in recovery, caller should also pass the heap
237 * buffer. When checksums are enabled and we're not in recovery, we must add
238 * the heap buffer to the WAL chain to protect it from being torn.
239 *
240 * You must pass a buffer containing the correct map page to this function.
241 * Call visibilitymap_pin first to pin the right one. This function doesn't do
242 * any I/O.
243 *
244 * Returns the state of the page's VM bits before setting flags.
245 */
246uint8
248 XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
249 uint8 flags)
250{
251 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
252 uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
253 uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
254 Page page;
255 uint8 *map;
256 uint8 status;
257
258#ifdef TRACE_VISIBILITYMAP
259 elog(DEBUG1, "vm_set flags 0x%02X for %s %d",
260 flags, RelationGetRelationName(rel), heapBlk);
261#endif
262
265 Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
266
267 /* Must never set all_frozen bit without also setting all_visible bit */
269
270 /* Check that we have the right heap page pinned, if present */
271 if (BufferIsValid(heapBuf) && BufferGetBlockNumber(heapBuf) != heapBlk)
272 elog(ERROR, "wrong heap buffer passed to visibilitymap_set");
273
274 Assert(!BufferIsValid(heapBuf) ||
276
277 /* Check that we have the right VM page pinned */
278 if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
279 elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
280
281 page = BufferGetPage(vmBuf);
282 map = (uint8 *) PageGetContents(page);
284
285 status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
286 if (flags != status)
287 {
289
290 map[mapByte] |= (flags << mapOffset);
291 MarkBufferDirty(vmBuf);
292
293 if (RelationNeedsWAL(rel))
294 {
295 if (!XLogRecPtrIsValid(recptr))
296 {
298 recptr = log_heap_visible(rel, heapBuf, vmBuf, cutoff_xid, flags);
299
300 /*
301 * If data checksums are enabled (or wal_log_hints=on), we
302 * need to protect the heap page from being torn.
303 *
304 * If not, then we must *not* update the heap page's LSN. In
305 * this case, the FPI for the heap page was omitted from the
306 * WAL record inserted above, so it would be incorrect to
307 * update the heap page's LSN.
308 */
310 {
311 Page heapPage = BufferGetPage(heapBuf);
312
313 PageSetLSN(heapPage, recptr);
314 }
315 }
316 PageSetLSN(page, recptr);
317 }
318
320 }
321
323 return status;
324}
325
326/*
327 * Set VM (visibility map) flags in the VM block in vmBuf.
328 *
329 * This function is intended for callers that log VM changes together
330 * with the heap page modifications that rendered the page all-visible.
331 * Callers that log VM changes separately should use visibilitymap_set().
332 *
333 * vmBuf must be pinned and exclusively locked, and it must cover the VM bits
334 * corresponding to heapBlk.
335 *
336 * In normal operation (not recovery), this must be called inside a critical
337 * section that also applies the necessary heap page changes and, if
338 * applicable, emits WAL.
339 *
340 * The caller is responsible for ensuring consistency between the heap page
341 * and the VM page by holding a pin and exclusive lock on the buffer
342 * containing heapBlk.
343 *
344 * rlocator is used only for debugging messages.
345 */
346uint8
348 Buffer vmBuf, uint8 flags,
349 const RelFileLocator rlocator)
350{
351 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
352 uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
353 uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
354 Page page;
355 uint8 *map;
356 uint8 status;
357
358#ifdef TRACE_VISIBILITYMAP
359 elog(DEBUG1, "vm_set flags 0x%02X for %s %d",
360 flags,
362 heapBlk);
363#endif
364
365 /* Call in same critical section where WAL is emitted. */
367
368 /* Flags should be valid. Also never clear bits with this function */
369 Assert((flags & VISIBILITYMAP_VALID_BITS) == flags);
370
371 /* Must never set all_frozen bit without also setting all_visible bit */
373
374 /* Check that we have the right VM page pinned */
375 if (!BufferIsValid(vmBuf) || BufferGetBlockNumber(vmBuf) != mapBlock)
376 elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
377
379
380 page = BufferGetPage(vmBuf);
381 map = (uint8 *) PageGetContents(page);
382
383 status = (map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS;
384 if (flags != status)
385 {
386 map[mapByte] |= (flags << mapOffset);
387 MarkBufferDirty(vmBuf);
388 }
389
390 return status;
391}
392
393/*
394 * visibilitymap_get_status - get status of bits
395 *
396 * Are all tuples on heapBlk visible to all or are marked frozen, according
397 * to the visibility map?
398 *
399 * On entry, *vmbuf should be InvalidBuffer or a valid buffer returned by an
400 * earlier call to visibilitymap_pin or visibilitymap_get_status on the same
401 * relation. On return, *vmbuf is a valid buffer with the map page containing
402 * the bit for heapBlk, or InvalidBuffer. The caller is responsible for
403 * releasing *vmbuf after it's done testing and setting bits.
404 *
405 * NOTE: This function is typically called without a lock on the heap page,
406 * so somebody else could change the bit just after we look at it. In fact,
407 * since we don't lock the visibility map page either, it's even possible that
408 * someone else could have changed the bit just before we look at it, but yet
409 * we might see the old value. It is the caller's responsibility to deal with
410 * all concurrency issues!
411 */
412uint8
414{
415 BlockNumber mapBlock = HEAPBLK_TO_MAPBLOCK(heapBlk);
416 uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
417 uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
418 char *map;
419 uint8 result;
420
421#ifdef TRACE_VISIBILITYMAP
422 elog(DEBUG1, "vm_get_status %s %d", RelationGetRelationName(rel), heapBlk);
423#endif
424
425 /* Reuse the old pinned buffer if possible */
426 if (BufferIsValid(*vmbuf))
427 {
428 if (BufferGetBlockNumber(*vmbuf) != mapBlock)
429 {
430 ReleaseBuffer(*vmbuf);
431 *vmbuf = InvalidBuffer;
432 }
433 }
434
435 if (!BufferIsValid(*vmbuf))
436 {
437 *vmbuf = vm_readbuf(rel, mapBlock, false);
438 if (!BufferIsValid(*vmbuf))
439 return (uint8) 0;
440 }
441
442 map = PageGetContents(BufferGetPage(*vmbuf));
443
444 /*
445 * A single byte read is atomic. There could be memory-ordering effects
446 * here, but for performance reasons we make it the caller's job to worry
447 * about that.
448 */
449 result = ((map[mapByte] >> mapOffset) & VISIBILITYMAP_VALID_BITS);
450 return result;
451}
452
453/*
454 * visibilitymap_count - count number of bits set in visibility map
455 *
456 * Note: we ignore the possibility of race conditions when the table is being
457 * extended concurrently with the call. New pages added to the table aren't
458 * going to be marked all-visible or all-frozen, so they won't affect the result.
459 */
460void
462{
463 BlockNumber mapBlock;
464 BlockNumber nvisible = 0;
465 BlockNumber nfrozen = 0;
466
467 /* all_visible must be specified */
468 Assert(all_visible);
469
470 for (mapBlock = 0;; mapBlock++)
471 {
472 Buffer mapBuffer;
473 uint64 *map;
474
475 /*
476 * Read till we fall off the end of the map. We assume that any extra
477 * bytes in the last page are zeroed, so we don't bother excluding
478 * them from the count.
479 */
480 mapBuffer = vm_readbuf(rel, mapBlock, false);
481 if (!BufferIsValid(mapBuffer))
482 break;
483
484 /*
485 * We choose not to lock the page, since the result is going to be
486 * immediately stale anyway if anyone is concurrently setting or
487 * clearing bits, and we only really need an approximate value.
488 */
489 map = (uint64 *) PageGetContents(BufferGetPage(mapBuffer));
490
491 nvisible += pg_popcount_masked((const char *) map, MAPSIZE, VISIBLE_MASK8);
492 if (all_frozen)
493 nfrozen += pg_popcount_masked((const char *) map, MAPSIZE, FROZEN_MASK8);
494
495 ReleaseBuffer(mapBuffer);
496 }
497
498 *all_visible = nvisible;
499 if (all_frozen)
500 *all_frozen = nfrozen;
501}
502
503/*
504 * visibilitymap_prepare_truncate -
505 * prepare for truncation of the visibility map
506 *
507 * nheapblocks is the new size of the heap.
508 *
509 * Return the number of blocks of new visibility map.
510 * If it's InvalidBlockNumber, there is nothing to truncate;
511 * otherwise the caller is responsible for calling smgrtruncate()
512 * to truncate the visibility map pages.
513 */
516{
517 BlockNumber newnblocks;
518
519 /* last remaining block, byte, and bit */
520 BlockNumber truncBlock = HEAPBLK_TO_MAPBLOCK(nheapblocks);
521 uint32 truncByte = HEAPBLK_TO_MAPBYTE(nheapblocks);
522 uint8 truncOffset = HEAPBLK_TO_OFFSET(nheapblocks);
523
524#ifdef TRACE_VISIBILITYMAP
525 elog(DEBUG1, "vm_truncate %s %d", RelationGetRelationName(rel), nheapblocks);
526#endif
527
528 /*
529 * If no visibility map has been created yet for this relation, there's
530 * nothing to truncate.
531 */
533 return InvalidBlockNumber;
534
535 /*
536 * Unless the new size is exactly at a visibility map page boundary, the
537 * tail bits in the last remaining map page, representing truncated heap
538 * blocks, need to be cleared. This is not only tidy, but also necessary
539 * because we don't get a chance to clear the bits if the heap is extended
540 * again.
541 */
542 if (truncByte != 0 || truncOffset != 0)
543 {
544 Buffer mapBuffer;
545 Page page;
546 char *map;
547
548 newnblocks = truncBlock + 1;
549
550 mapBuffer = vm_readbuf(rel, truncBlock, false);
551 if (!BufferIsValid(mapBuffer))
552 {
553 /* nothing to do, the file was already smaller */
554 return InvalidBlockNumber;
555 }
556
557 page = BufferGetPage(mapBuffer);
558 map = PageGetContents(page);
559
561
562 /* NO EREPORT(ERROR) from here till changes are logged */
564
565 /* Clear out the unwanted bytes. */
566 MemSet(&map[truncByte + 1], 0, MAPSIZE - (truncByte + 1));
567
568 /*----
569 * Mask out the unwanted bits of the last remaining byte.
570 *
571 * ((1 << 0) - 1) = 00000000
572 * ((1 << 1) - 1) = 00000001
573 * ...
574 * ((1 << 6) - 1) = 00111111
575 * ((1 << 7) - 1) = 01111111
576 *----
577 */
578 map[truncByte] &= (1 << truncOffset) - 1;
579
580 /*
581 * Truncation of a relation is WAL-logged at a higher-level, and we
582 * will be called at WAL replay. But if checksums are enabled, we need
583 * to still write a WAL record to protect against a torn page, if the
584 * page is flushed to disk before the truncation WAL record. We cannot
585 * use MarkBufferDirtyHint here, because that will not dirty the page
586 * during recovery.
587 */
588 MarkBufferDirty(mapBuffer);
590 log_newpage_buffer(mapBuffer, false);
591
593
594 UnlockReleaseBuffer(mapBuffer);
595 }
596 else
597 newnblocks = truncBlock;
598
599 if (smgrnblocks(RelationGetSmgr(rel), VISIBILITYMAP_FORKNUM) <= newnblocks)
600 {
601 /* nothing to do, the file was already smaller than requested size */
602 return InvalidBlockNumber;
603 }
604
605 return newnblocks;
606}
607
608/*
609 * Read a visibility map page.
610 *
611 * If the page doesn't exist, InvalidBuffer is returned, or if 'extend' is
612 * true, the visibility map file is extended.
613 */
614static Buffer
615vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
616{
617 Buffer buf;
618 SMgrRelation reln;
619
620 /*
621 * Caution: re-using this smgr pointer could fail if the relcache entry
622 * gets closed. It's safe as long as we only do smgr-level operations
623 * between here and the last use of the pointer.
624 */
625 reln = RelationGetSmgr(rel);
626
627 /*
628 * If we haven't cached the size of the visibility map fork yet, check it
629 * first.
630 */
632 {
635 else
637 }
638
639 /*
640 * For reading we use ZERO_ON_ERROR mode, and initialize the page if
641 * necessary. It's always safe to clear bits, so it's better to clear
642 * corrupt pages than error out.
643 *
644 * We use the same path below to initialize pages when extending the
645 * relation, as a concurrent extension can end up with vm_extend()
646 * returning an already-initialized page.
647 */
648 if (blkno >= reln->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM])
649 {
650 if (extend)
651 buf = vm_extend(rel, blkno + 1);
652 else
653 return InvalidBuffer;
654 }
655 else
657 RBM_ZERO_ON_ERROR, NULL);
658
659 /*
660 * Initializing the page when needed is trickier than it looks, because of
661 * the possibility of multiple backends doing this concurrently, and our
662 * desire to not uselessly take the buffer lock in the normal path where
663 * the page is OK. We must take the lock to initialize the page, so
664 * recheck page newness after we have the lock, in case someone else
665 * already did it. Also, because we initially check PageIsNew with no
666 * lock, it's possible to fall through and return the buffer while someone
667 * else is still initializing the page (i.e., we might see pd_upper as set
668 * but other page header fields are still zeroes). This is harmless for
669 * callers that will take a buffer lock themselves, but some callers
670 * inspect the page without any lock at all. The latter is OK only so
671 * long as it doesn't depend on the page header having correct contents.
672 * Current usage is safe because PageGetContents() does not require that.
673 */
675 {
678 PageInit(BufferGetPage(buf), BLCKSZ, 0);
680 }
681 return buf;
682}
683
684/*
685 * Ensure that the visibility map fork is at least vm_nblocks long, extending
686 * it if necessary with zeroed pages.
687 */
688static Buffer
690{
691 Buffer buf;
692
696 vm_nblocks,
698
699 /*
700 * Send a shared-inval message to force other backends to close any smgr
701 * references they may have for this rel, which we are about to change.
702 * This is a useful optimization because it means that backends don't have
703 * to keep checking for creation or extension of the file, which happens
704 * infrequently.
705 */
706 CacheInvalidateSmgr(RelationGetSmgr(rel)->smgr_rlocator);
707
708 return buf;
709}
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4223
bool BufferIsLockedByMeInMode(Buffer buffer, int mode)
Definition: bufmgr.c:2869
Buffer ExtendBufferedRelTo(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, BlockNumber extend_to, ReadBufferMode mode)
Definition: bufmgr.c:906
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5366
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:5383
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2943
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5604
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:792
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:203
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:425
@ EB_CLEAR_SIZE_CACHE
Definition: bufmgr.h:90
@ EB_CREATE_FORK_IF_NEEDED
Definition: bufmgr.h:84
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:205
@ RBM_ZERO_ON_ERROR
Definition: bufmgr.h:51
#define BMR_REL(p_rel)
Definition: bufmgr.h:114
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:376
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:42
static bool PageIsAllVisible(const PageData *page)
Definition: bufpage.h:428
static bool PageIsNew(const PageData *page)
Definition: bufpage.h:233
static char * PageGetContents(Page page)
Definition: bufpage.h:257
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:390
PageData * Page
Definition: bufpage.h:81
uint8_t uint8
Definition: c.h:541
uint64_t uint64
Definition: c.h:544
uint32_t uint32
Definition: c.h:543
#define MemSet(start, val, len)
Definition: c.h:1024
uint32 TransactionId
Definition: c.h:662
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
ProcNumber MyProcNumber
Definition: globals.c:90
volatile uint32 CritSectionCount
Definition: globals.c:45
Assert(PointerIsAligned(start, uint64))
const char * str
XLogRecPtr log_heap_visible(Relation rel, Buffer heap_buffer, Buffer vm_buffer, TransactionId snapshotConflictHorizon, uint8 vmflags)
Definition: heapam.c:8815
void CacheInvalidateSmgr(RelFileLocatorBackend rlocator)
Definition: inval.c:1751
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
static uint64 pg_popcount_masked(const char *buf, int bytes, bits8 mask)
Definition: pg_bitutils.h:394
static char * buf
Definition: pg_test_fsync.c:72
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:577
#define RelationGetRelationName(relation)
Definition: rel.h:549
#define RelationNeedsWAL(relation)
Definition: rel.h:638
@ VISIBILITYMAP_FORKNUM
Definition: relpath.h:60
@ MAIN_FORKNUM
Definition: relpath.h:58
#define relpathbackend(rlocator, backend, forknum)
Definition: relpath.h:141
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:819
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:462
BlockNumber smgr_cached_nblocks[MAX_FORKNUM+1]
Definition: smgr.h:47
#define MAPSIZE
#define FROZEN_MASK8
bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
#define VISIBLE_MASK8
#define HEAPBLK_TO_OFFSET(x)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
static Buffer vm_extend(Relation rel, BlockNumber vm_nblocks)
uint8 visibilitymap_set_vmbits(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
BlockNumber visibilitymap_prepare_truncate(Relation rel, BlockNumber nheapblocks)
void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen)
static Buffer vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define HEAPBLK_TO_MAPBLOCK(x)
#define HEAPBLK_TO_MAPBYTE(x)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
#define XLogHintBitIsNeeded()
Definition: xlog.h:120
#define XLogRecPtrIsValid(r)
Definition: xlogdefs.h:29
uint64 XLogRecPtr
Definition: xlogdefs.h:21
XLogRecPtr log_newpage_buffer(Buffer buffer, bool page_std)
Definition: xloginsert.c:1259
bool InRecovery
Definition: xlogutils.c:50