PostgreSQL Source Code git master
catcache.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * catcache.c
4 * System catalog cache for tuples matching a key.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/utils/cache/catcache.c
12 *
13 *-------------------------------------------------------------------------
14 */
15#include "postgres.h"
16
17#include "access/genam.h"
18#include "access/heaptoast.h"
19#include "access/relscan.h"
20#include "access/table.h"
21#include "access/xact.h"
22#include "catalog/catalog.h"
24#include "catalog/pg_type.h"
25#include "common/hashfn.h"
26#include "common/pg_prng.h"
27#include "miscadmin.h"
28#include "port/pg_bitutils.h"
29#ifdef CATCACHE_STATS
30#include "storage/ipc.h" /* for on_proc_exit */
31#endif
32#include "storage/lmgr.h"
33#include "utils/builtins.h"
34#include "utils/catcache.h"
35#include "utils/datum.h"
36#include "utils/fmgroids.h"
38#include "utils/inval.h"
39#include "utils/memutils.h"
40#include "utils/rel.h"
41#include "utils/resowner.h"
42#include "utils/syscache.h"
43
44/*
45 * If a catcache invalidation is processed while we are in the middle of
46 * creating a catcache entry (or list), it might apply to the entry we're
47 * creating, making it invalid before it's been inserted to the catcache. To
48 * catch such cases, we have a stack of "create-in-progress" entries. Cache
49 * invalidation marks any matching entries in the stack as dead, in addition
50 * to the actual CatCTup and CatCList entries.
51 */
52typedef struct CatCInProgress
53{
54 CatCache *cache; /* cache that the entry belongs to */
55 uint32 hash_value; /* hash of the entry; ignored for lists */
56 bool list; /* is it a list entry? */
57 bool dead; /* set when the entry is invalidated */
60
62
63 /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
64
65/*
66 * Given a hash value and the size of the hash table, find the bucket
67 * in which the hash value belongs. Since the hash table must contain
68 * a power-of-2 number of elements, this is a simple bitmask.
69 */
70#define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
71
72
73/*
74 * variables, macros and other stuff
75 */
76
77#ifdef CACHEDEBUG
78#define CACHE_elog(...) elog(__VA_ARGS__)
79#else
80#define CACHE_elog(...)
81#endif
82
83/* Cache management header --- pointer is NULL until created */
84static CatCacheHeader *CacheHdr = NULL;
85
87 int nkeys,
88 Datum v1, Datum v2,
89 Datum v3, Datum v4);
90
92 int nkeys,
93 uint32 hashValue,
94 Index hashIndex,
95 Datum v1, Datum v2,
96 Datum v3, Datum v4);
97
99 Datum v1, Datum v2, Datum v3, Datum v4);
101 HeapTuple tuple);
102static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
103 const Datum *cachekeys,
104 const Datum *searchkeys);
105
106#ifdef CATCACHE_STATS
107static void CatCachePrintStats(int code, Datum arg);
108#endif
109static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
110static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
111static void RehashCatCache(CatCache *cp);
112static void RehashCatCacheLists(CatCache *cp);
116 uint32 hashValue, Index hashIndex);
117
118static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner);
120static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, const int *attnos,
121 const Datum *keys);
122static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, const int *attnos,
123 const Datum *srckeys, Datum *dstkeys);
124
125
126/*
127 * internal support functions
128 */
129
130/* ResourceOwner callbacks to hold catcache references */
131
132static void ResOwnerReleaseCatCache(Datum res);
133static char *ResOwnerPrintCatCache(Datum res);
134static void ResOwnerReleaseCatCacheList(Datum res);
135static char *ResOwnerPrintCatCacheList(Datum res);
136
138{
139 /* catcache references */
140 .name = "catcache reference",
141 .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
142 .release_priority = RELEASE_PRIO_CATCACHE_REFS,
143 .ReleaseResource = ResOwnerReleaseCatCache,
144 .DebugPrint = ResOwnerPrintCatCache
145};
146
148{
149 /* catcache-list pins */
150 .name = "catcache list reference",
151 .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
152 .release_priority = RELEASE_PRIO_CATCACHE_LIST_REFS,
153 .ReleaseResource = ResOwnerReleaseCatCacheList,
154 .DebugPrint = ResOwnerPrintCatCacheList
155};
156
157/* Convenience wrappers over ResourceOwnerRemember/Forget */
158static inline void
160{
162}
163static inline void
165{
167}
168static inline void
170{
172}
173static inline void
175{
177}
178
179
180/*
181 * Hash and equality functions for system types that are used as cache key
182 * fields. In some cases, we just call the regular SQL-callable functions for
183 * the appropriate data type, but that tends to be a little slow, and the
184 * speed of these functions is performance-critical. Therefore, for data
185 * types that frequently occur as catcache keys, we hard-code the logic here.
186 * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
187 * in certain cases (like int4) we can adopt a faster hash algorithm as well.
188 */
189
190static bool
192{
193 return DatumGetChar(a) == DatumGetChar(b);
194}
195
196static uint32
198{
199 return murmurhash32((int32) DatumGetChar(datum));
200}
201
202static bool
204{
205 char *ca = NameStr(*DatumGetName(a));
206 char *cb = NameStr(*DatumGetName(b));
207
208 return strncmp(ca, cb, NAMEDATALEN) == 0;
209}
210
211static uint32
213{
214 char *key = NameStr(*DatumGetName(datum));
215
216 return hash_bytes((unsigned char *) key, strlen(key));
217}
218
219static bool
221{
222 return DatumGetInt16(a) == DatumGetInt16(b);
223}
224
225static uint32
227{
228 return murmurhash32((int32) DatumGetInt16(datum));
229}
230
231static bool
233{
234 return DatumGetInt32(a) == DatumGetInt32(b);
235}
236
237static uint32
239{
240 return murmurhash32((int32) DatumGetInt32(datum));
241}
242
243static bool
245{
246 /*
247 * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
248 * want to take the fast "deterministic" path in texteq().
249 */
250 return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
251}
252
253static uint32
255{
256 /* analogously here as in texteqfast() */
257 return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
258}
259
260static bool
262{
264}
265
266static uint32
268{
270}
271
272/* Lookup support functions for a type. */
273static void
274GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
275{
276 switch (keytype)
277 {
278 case BOOLOID:
279 *hashfunc = charhashfast;
280 *fasteqfunc = chareqfast;
281 *eqfunc = F_BOOLEQ;
282 break;
283 case CHAROID:
284 *hashfunc = charhashfast;
285 *fasteqfunc = chareqfast;
286 *eqfunc = F_CHAREQ;
287 break;
288 case NAMEOID:
289 *hashfunc = namehashfast;
290 *fasteqfunc = nameeqfast;
291 *eqfunc = F_NAMEEQ;
292 break;
293 case INT2OID:
294 *hashfunc = int2hashfast;
295 *fasteqfunc = int2eqfast;
296 *eqfunc = F_INT2EQ;
297 break;
298 case INT4OID:
299 *hashfunc = int4hashfast;
300 *fasteqfunc = int4eqfast;
301 *eqfunc = F_INT4EQ;
302 break;
303 case TEXTOID:
304 *hashfunc = texthashfast;
305 *fasteqfunc = texteqfast;
306 *eqfunc = F_TEXTEQ;
307 break;
308 case OIDOID:
309 case REGPROCOID:
310 case REGPROCEDUREOID:
311 case REGOPEROID:
312 case REGOPERATOROID:
313 case REGCLASSOID:
314 case REGTYPEOID:
315 case REGCOLLATIONOID:
316 case REGCONFIGOID:
317 case REGDICTIONARYOID:
318 case REGROLEOID:
319 case REGNAMESPACEOID:
320 case REGDATABASEOID:
321 *hashfunc = int4hashfast;
322 *fasteqfunc = int4eqfast;
323 *eqfunc = F_OIDEQ;
324 break;
325 case OIDVECTOROID:
326 *hashfunc = oidvectorhashfast;
327 *fasteqfunc = oidvectoreqfast;
328 *eqfunc = F_OIDVECTOREQ;
329 break;
330 default:
331 elog(FATAL, "type %u not supported as catcache key", keytype);
332 *hashfunc = NULL; /* keep compiler quiet */
333
334 *eqfunc = InvalidOid;
335 break;
336 }
337}
338
339/*
340 * CatalogCacheComputeHashValue
341 *
342 * Compute the hash value associated with a given set of lookup keys
343 */
344static uint32
346 Datum v1, Datum v2, Datum v3, Datum v4)
347{
348 uint32 hashValue = 0;
349 uint32 oneHash;
350 CCHashFN *cc_hashfunc = cache->cc_hashfunc;
351
352 CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
353 cache->cc_relname, nkeys, cache);
354
355 switch (nkeys)
356 {
357 case 4:
358 oneHash = (cc_hashfunc[3]) (v4);
359 hashValue ^= pg_rotate_left32(oneHash, 24);
360 /* FALLTHROUGH */
361 case 3:
362 oneHash = (cc_hashfunc[2]) (v3);
363 hashValue ^= pg_rotate_left32(oneHash, 16);
364 /* FALLTHROUGH */
365 case 2:
366 oneHash = (cc_hashfunc[1]) (v2);
367 hashValue ^= pg_rotate_left32(oneHash, 8);
368 /* FALLTHROUGH */
369 case 1:
370 oneHash = (cc_hashfunc[0]) (v1);
371 hashValue ^= oneHash;
372 break;
373 default:
374 elog(FATAL, "wrong number of hash keys: %d", nkeys);
375 break;
376 }
377
378 return hashValue;
379}
380
381/*
382 * CatalogCacheComputeTupleHashValue
383 *
384 * Compute the hash value associated with a given tuple to be cached
385 */
386static uint32
388{
389 Datum v1 = 0,
390 v2 = 0,
391 v3 = 0,
392 v4 = 0;
393 bool isNull = false;
394 int *cc_keyno = cache->cc_keyno;
395 TupleDesc cc_tupdesc = cache->cc_tupdesc;
396
397 /* Now extract key fields from tuple, insert into scankey */
398 switch (nkeys)
399 {
400 case 4:
401 v4 = fastgetattr(tuple,
402 cc_keyno[3],
403 cc_tupdesc,
404 &isNull);
405 Assert(!isNull);
406 /* FALLTHROUGH */
407 case 3:
408 v3 = fastgetattr(tuple,
409 cc_keyno[2],
410 cc_tupdesc,
411 &isNull);
412 Assert(!isNull);
413 /* FALLTHROUGH */
414 case 2:
415 v2 = fastgetattr(tuple,
416 cc_keyno[1],
417 cc_tupdesc,
418 &isNull);
419 Assert(!isNull);
420 /* FALLTHROUGH */
421 case 1:
422 v1 = fastgetattr(tuple,
423 cc_keyno[0],
424 cc_tupdesc,
425 &isNull);
426 Assert(!isNull);
427 break;
428 default:
429 elog(FATAL, "wrong number of hash keys: %d", nkeys);
430 break;
431 }
432
433 return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
434}
435
436/*
437 * CatalogCacheCompareTuple
438 *
439 * Compare a tuple to the passed arguments.
440 */
441static inline bool
443 const Datum *cachekeys,
444 const Datum *searchkeys)
445{
446 const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
447 int i;
448
449 for (i = 0; i < nkeys; i++)
450 {
451 if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
452 return false;
453 }
454 return true;
455}
456
457
458#ifdef CATCACHE_STATS
459
460static void
461CatCachePrintStats(int code, Datum arg)
462{
463 slist_iter iter;
464 uint64 cc_searches = 0;
465 uint64 cc_hits = 0;
466 uint64 cc_neg_hits = 0;
467 uint64 cc_newloads = 0;
468 uint64 cc_invals = 0;
469 uint64 cc_nlists = 0;
470 uint64 cc_lsearches = 0;
471 uint64 cc_lhits = 0;
472
474 {
475 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
476
477 if (cache->cc_ntup == 0 && cache->cc_searches == 0)
478 continue; /* don't print unused caches */
479 elog(DEBUG2, "catcache %s/%u: %d tup, %" PRIu64 " srch, %" PRIu64 "+%"
480 PRIu64 "=%" PRIu64 " hits, %" PRIu64 "+%" PRIu64 "=%"
481 PRIu64 " loads, %" PRIu64 " invals, %d lists, %" PRIu64
482 " lsrch, %" PRIu64 " lhits",
485 cache->cc_ntup,
486 cache->cc_searches,
487 cache->cc_hits,
488 cache->cc_neg_hits,
489 cache->cc_hits + cache->cc_neg_hits,
490 cache->cc_newloads,
491 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
492 cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
493 cache->cc_invals,
495 cache->cc_lsearches,
496 cache->cc_lhits);
497 cc_searches += cache->cc_searches;
498 cc_hits += cache->cc_hits;
499 cc_neg_hits += cache->cc_neg_hits;
500 cc_newloads += cache->cc_newloads;
501 cc_invals += cache->cc_invals;
502 cc_nlists += cache->cc_nlist;
503 cc_lsearches += cache->cc_lsearches;
504 cc_lhits += cache->cc_lhits;
505 }
506 elog(DEBUG2, "catcache totals: %d tup, %" PRIu64 " srch, %" PRIu64 "+%"
507 PRIu64 "=%" PRIu64 " hits, %" PRIu64 "+%" PRIu64 "=%" PRIu64
508 " loads, %" PRIu64 " invals, %" PRIu64 " lists, %" PRIu64
509 " lsrch, %" PRIu64 " lhits",
511 cc_searches,
512 cc_hits,
513 cc_neg_hits,
514 cc_hits + cc_neg_hits,
515 cc_newloads,
516 cc_searches - cc_hits - cc_neg_hits - cc_newloads,
517 cc_searches - cc_hits - cc_neg_hits,
518 cc_invals,
519 cc_nlists,
520 cc_lsearches,
521 cc_lhits);
522}
523#endif /* CATCACHE_STATS */
524
525
526/*
527 * CatCacheRemoveCTup
528 *
529 * Unlink and delete the given cache entry
530 *
531 * NB: if it is a member of a CatCList, the CatCList is deleted too.
532 * Both the cache entry and the list had better have zero refcount.
533 */
534static void
536{
537 Assert(ct->refcount == 0);
538 Assert(ct->my_cache == cache);
539
540 if (ct->c_list)
541 {
542 /*
543 * The cleanest way to handle this is to call CatCacheRemoveCList,
544 * which will recurse back to me, and the recursive call will do the
545 * work. Set the "dead" flag to make sure it does recurse.
546 */
547 ct->dead = true;
549 return; /* nothing left to do */
550 }
551
552 /* delink from linked list */
554
555 /*
556 * Free keys when we're dealing with a negative entry, normal entries just
557 * point into tuple, allocated together with the CatCTup.
558 */
559 if (ct->negative)
561 cache->cc_keyno, ct->keys);
562
563 pfree(ct);
564
565 --cache->cc_ntup;
566 --CacheHdr->ch_ntup;
567}
568
569/*
570 * CatCacheRemoveCList
571 *
572 * Unlink and delete the given cache list entry
573 *
574 * NB: any dead member entries that become unreferenced are deleted too.
575 */
576static void
578{
579 int i;
580
581 Assert(cl->refcount == 0);
582 Assert(cl->my_cache == cache);
583
584 /* delink from member tuples */
585 for (i = cl->n_members; --i >= 0;)
586 {
587 CatCTup *ct = cl->members[i];
588
589 Assert(ct->c_list == cl);
590 ct->c_list = NULL;
591 /* if the member is dead and now has no references, remove it */
592 if (
593#ifndef CATCACHE_FORCE_RELEASE
594 ct->dead &&
595#endif
596 ct->refcount == 0)
598 }
599
600 /* delink from linked list */
602
603 /* free associated column data */
605 cache->cc_keyno, cl->keys);
606
607 pfree(cl);
608
609 --cache->cc_nlist;
610}
611
612
613/*
614 * CatCacheInvalidate
615 *
616 * Invalidate entries in the specified cache, given a hash value.
617 *
618 * We delete cache entries that match the hash value, whether positive
619 * or negative. We don't care whether the invalidation is the result
620 * of a tuple insertion or a deletion.
621 *
622 * We used to try to match positive cache entries by TID, but that is
623 * unsafe after a VACUUM FULL on a system catalog: an inval event could
624 * be queued before VACUUM FULL, and then processed afterwards, when the
625 * target tuple that has to be invalidated has a different TID than it
626 * did when the event was created. So now we just compare hash values and
627 * accept the small risk of unnecessary invalidations due to false matches.
628 *
629 * This routine is only quasi-public: it should only be used by inval.c.
630 */
631void
633{
634 Index hashIndex;
636
637 CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
638
639 /*
640 * We don't bother to check whether the cache has finished initialization
641 * yet; if not, there will be no entries in it so no problem.
642 */
643
644 /*
645 * Invalidate *all* CatCLists in this cache; it's too hard to tell which
646 * searches might still be correct, so just zap 'em all.
647 */
648 for (int i = 0; i < cache->cc_nlbuckets; i++)
649 {
650 dlist_head *bucket = &cache->cc_lbucket[i];
651
652 dlist_foreach_modify(iter, bucket)
653 {
654 CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
655
656 if (cl->refcount > 0)
657 cl->dead = true;
658 else
660 }
661 }
662
663 /*
664 * inspect the proper hash bucket for tuple matches
665 */
666 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
667 dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
668 {
669 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
670
671 if (hashValue == ct->hash_value)
672 {
673 if (ct->refcount > 0 ||
674 (ct->c_list && ct->c_list->refcount > 0))
675 {
676 ct->dead = true;
677 /* list, if any, was marked dead above */
678 Assert(ct->c_list == NULL || ct->c_list->dead);
679 }
680 else
682 CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
683#ifdef CATCACHE_STATS
684 cache->cc_invals++;
685#endif
686 /* could be multiple matches, so keep looking! */
687 }
688 }
689
690 /* Also invalidate any entries that are being built */
691 for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
692 {
693 if (e->cache == cache)
694 {
695 if (e->list || e->hash_value == hashValue)
696 e->dead = true;
697 }
698 }
699}
700
701/* ----------------------------------------------------------------
702 * public functions
703 * ----------------------------------------------------------------
704 */
705
706
707/*
708 * Standard routine for creating cache context if it doesn't exist yet
709 *
710 * There are a lot of places (probably far more than necessary) that check
711 * whether CacheMemoryContext exists yet and want to create it if not.
712 * We centralize knowledge of exactly how to create it here.
713 */
714void
716{
717 /*
718 * Purely for paranoia, check that context doesn't exist; caller probably
719 * did so already.
720 */
723 "CacheMemoryContext",
725}
726
727
728/*
729 * ResetCatalogCache
730 *
731 * Reset one catalog cache to empty.
732 *
733 * This is not very efficient if the target cache is nearly empty.
734 * However, it shouldn't need to be efficient; we don't invoke it often.
735 *
736 * If 'debug_discard' is true, we are being called as part of
737 * debug_discard_caches. In that case, the cache is not reset for
738 * correctness, but just to get more testing of cache invalidation. We skip
739 * resetting in-progress build entries in that case, or we'd never make any
740 * progress.
741 */
742static void
743ResetCatalogCache(CatCache *cache, bool debug_discard)
744{
746 int i;
747
748 /* Remove each list in this cache, or at least mark it dead */
749 for (i = 0; i < cache->cc_nlbuckets; i++)
750 {
751 dlist_head *bucket = &cache->cc_lbucket[i];
752
753 dlist_foreach_modify(iter, bucket)
754 {
755 CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
756
757 if (cl->refcount > 0)
758 cl->dead = true;
759 else
761 }
762 }
763
764 /* Remove each tuple in this cache, or at least mark it dead */
765 for (i = 0; i < cache->cc_nbuckets; i++)
766 {
767 dlist_head *bucket = &cache->cc_bucket[i];
768
769 dlist_foreach_modify(iter, bucket)
770 {
771 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
772
773 if (ct->refcount > 0 ||
774 (ct->c_list && ct->c_list->refcount > 0))
775 {
776 ct->dead = true;
777 /* list, if any, was marked dead above */
778 Assert(ct->c_list == NULL || ct->c_list->dead);
779 }
780 else
782#ifdef CATCACHE_STATS
783 cache->cc_invals++;
784#endif
785 }
786 }
787
788 /* Also invalidate any entries that are being built */
789 if (!debug_discard)
790 {
791 for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
792 {
793 if (e->cache == cache)
794 e->dead = true;
795 }
796 }
797}
798
799/*
800 * ResetCatalogCaches
801 *
802 * Reset all caches when a shared cache inval event forces it
803 */
804void
806{
808}
809
810void
811ResetCatalogCachesExt(bool debug_discard)
812{
813 slist_iter iter;
814
815 CACHE_elog(DEBUG2, "ResetCatalogCaches called");
816
818 {
819 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
820
821 ResetCatalogCache(cache, debug_discard);
822 }
823
824 CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
825}
826
827/*
828 * CatalogCacheFlushCatalog
829 *
830 * Flush all catcache entries that came from the specified system catalog.
831 * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
832 * tuples very likely now have different TIDs than before. (At one point
833 * we also tried to force re-execution of CatalogCacheInitializeCache for
834 * the cache(s) on that catalog. This is a bad idea since it leads to all
835 * kinds of trouble if a cache flush occurs while loading cache entries.
836 * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
837 * rather than relying on the relcache to keep a tupdesc for us. Of course
838 * this assumes the tupdesc of a cacheable system table will not change...)
839 */
840void
842{
843 slist_iter iter;
844
845 CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
846
848 {
849 CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
850
851 /* Does this cache store tuples of the target catalog? */
852 if (cache->cc_reloid == catId)
853 {
854 /* Yes, so flush all its contents */
855 ResetCatalogCache(cache, false);
856
857 /* Tell inval.c to call syscache callbacks for this cache */
859 }
860 }
861
862 CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
863}
864
865/*
866 * InitCatCache
867 *
868 * This allocates and initializes a cache for a system catalog relation.
869 * Actually, the cache is only partially initialized to avoid opening the
870 * relation. The relation will be opened and the rest of the cache
871 * structure initialized on the first access.
872 */
873#ifdef CACHEDEBUG
874#define InitCatCache_DEBUG2 \
875do { \
876 elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
877 cp->cc_reloid, cp->cc_indexoid, cp->id, \
878 cp->cc_nkeys, cp->cc_nbuckets); \
879} while(0)
880#else
881#define InitCatCache_DEBUG2
882#endif
883
884CatCache *
886 Oid reloid,
887 Oid indexoid,
888 int nkeys,
889 const int *key,
890 int nbuckets)
891{
892 CatCache *cp;
893 MemoryContext oldcxt;
894 int i;
895
896 /*
897 * nbuckets is the initial number of hash buckets to use in this catcache.
898 * It will be enlarged later if it becomes too full.
899 *
900 * nbuckets must be a power of two. We check this via Assert rather than
901 * a full runtime check because the values will be coming from constant
902 * tables.
903 *
904 * If you're confused by the power-of-two check, see comments in
905 * bitmapset.c for an explanation.
906 */
907 Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
908
909 /*
910 * first switch to the cache context so our allocations do not vanish at
911 * the end of a transaction
912 */
915
917
918 /*
919 * if first time through, initialize the cache group header
920 */
921 if (CacheHdr == NULL)
922 {
925 CacheHdr->ch_ntup = 0;
926#ifdef CATCACHE_STATS
927 /* set up to dump stats at backend exit */
928 on_proc_exit(CatCachePrintStats, 0);
929#endif
930 }
931
932 /*
933 * Allocate a new cache structure, aligning to a cacheline boundary
934 *
935 * Note: we rely on zeroing to initialize all the dlist headers correctly
936 */
939 cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
940
941 /*
942 * Many catcaches never receive any list searches. Therefore, we don't
943 * allocate the cc_lbuckets till we get a list search.
944 */
945 cp->cc_lbucket = NULL;
946
947 /*
948 * initialize the cache's relation information for the relation
949 * corresponding to this cache, and initialize some of the new cache's
950 * other internal fields. But don't open the relation yet.
951 */
952 cp->id = id;
953 cp->cc_relname = "(not known yet)";
954 cp->cc_reloid = reloid;
955 cp->cc_indexoid = indexoid;
956 cp->cc_relisshared = false; /* temporary */
957 cp->cc_tupdesc = (TupleDesc) NULL;
958 cp->cc_ntup = 0;
959 cp->cc_nlist = 0;
960 cp->cc_nbuckets = nbuckets;
961 cp->cc_nlbuckets = 0;
962 cp->cc_nkeys = nkeys;
963 for (i = 0; i < nkeys; ++i)
964 {
966 cp->cc_keyno[i] = key[i];
967 }
968
969 /*
970 * new cache is initialized as far as we can go for now. print some
971 * debugging information, if appropriate.
972 */
974
975 /*
976 * add completed cache to top of group header's list
977 */
979
980 /*
981 * back to the old context before we return...
982 */
983 MemoryContextSwitchTo(oldcxt);
984
985 return cp;
986}
987
988/*
989 * Enlarge a catcache, doubling the number of buckets.
990 */
991static void
993{
994 dlist_head *newbucket;
995 int newnbuckets;
996 int i;
997
998 elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
999 cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
1000
1001 /* Allocate a new, larger, hash table. */
1002 newnbuckets = cp->cc_nbuckets * 2;
1003 newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
1004
1005 /* Move all entries from old hash table to new. */
1006 for (i = 0; i < cp->cc_nbuckets; i++)
1007 {
1008 dlist_mutable_iter iter;
1009
1010 dlist_foreach_modify(iter, &cp->cc_bucket[i])
1011 {
1012 CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
1013 int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
1014
1015 dlist_delete(iter.cur);
1016 dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
1017 }
1018 }
1019
1020 /* Switch to the new array. */
1021 pfree(cp->cc_bucket);
1022 cp->cc_nbuckets = newnbuckets;
1023 cp->cc_bucket = newbucket;
1024}
1025
1026/*
1027 * Enlarge a catcache's list storage, doubling the number of buckets.
1028 */
1029static void
1031{
1032 dlist_head *newbucket;
1033 int newnbuckets;
1034 int i;
1035
1036 elog(DEBUG1, "rehashing catalog cache id %d for %s; %d lists, %d buckets",
1037 cp->id, cp->cc_relname, cp->cc_nlist, cp->cc_nlbuckets);
1038
1039 /* Allocate a new, larger, hash table. */
1040 newnbuckets = cp->cc_nlbuckets * 2;
1041 newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
1042
1043 /* Move all entries from old hash table to new. */
1044 for (i = 0; i < cp->cc_nlbuckets; i++)
1045 {
1046 dlist_mutable_iter iter;
1047
1048 dlist_foreach_modify(iter, &cp->cc_lbucket[i])
1049 {
1050 CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
1051 int hashIndex = HASH_INDEX(cl->hash_value, newnbuckets);
1052
1053 dlist_delete(iter.cur);
1054 dlist_push_head(&newbucket[hashIndex], &cl->cache_elem);
1055 }
1056 }
1057
1058 /* Switch to the new array. */
1059 pfree(cp->cc_lbucket);
1060 cp->cc_nlbuckets = newnbuckets;
1061 cp->cc_lbucket = newbucket;
1062}
1063
1064/*
1065 * ConditionalCatalogCacheInitializeCache
1066 *
1067 * Call CatalogCacheInitializeCache() if not yet done.
1068 */
1070static void
1072{
1073#ifdef USE_ASSERT_CHECKING
1074 /*
1075 * TypeCacheRelCallback() runs outside transactions and relies on TYPEOID
1076 * for hashing. This isn't ideal. Since lookup_type_cache() both
1077 * registers the callback and searches TYPEOID, reaching trouble likely
1078 * requires OOM at an unlucky moment.
1079 *
1080 * InvalidateAttoptCacheCallback() runs outside transactions and likewise
1081 * relies on ATTNUM. InitPostgres() initializes ATTNUM, so it's reliable.
1082 */
1083 if (!(cache->id == TYPEOID || cache->id == ATTNUM) ||
1086 else
1087 Assert(cache->cc_tupdesc != NULL);
1088#endif
1089
1090 if (unlikely(cache->cc_tupdesc == NULL))
1092}
1093
1094/*
1095 * CatalogCacheInitializeCache
1096 *
1097 * This function does final initialization of a catcache: obtain the tuple
1098 * descriptor and set up the hash and equality function links.
1099 */
1100#ifdef CACHEDEBUG
1101#define CatalogCacheInitializeCache_DEBUG1 \
1102 elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
1103 cache->cc_reloid)
1104
1105#define CatalogCacheInitializeCache_DEBUG2 \
1106do { \
1107 if (cache->cc_keyno[i] > 0) { \
1108 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
1109 i+1, cache->cc_nkeys, cache->cc_keyno[i], \
1110 TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
1111 } else { \
1112 elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
1113 i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
1114 } \
1115} while(0)
1116#else
1117#define CatalogCacheInitializeCache_DEBUG1
1118#define CatalogCacheInitializeCache_DEBUG2
1119#endif
1120
1121static void
1123{
1124 Relation relation;
1125 MemoryContext oldcxt;
1126 TupleDesc tupdesc;
1127 int i;
1128
1130
1132
1133 /*
1134 * switch to the cache context so our allocations do not vanish at the end
1135 * of a transaction
1136 */
1137 Assert(CacheMemoryContext != NULL);
1138
1140
1141 /*
1142 * copy the relcache's tuple descriptor to permanent cache storage
1143 */
1144 tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
1145
1146 /*
1147 * save the relation's name and relisshared flag, too (cc_relname is used
1148 * only for debugging purposes)
1149 */
1151 cache->cc_relisshared = RelationGetForm(relation)->relisshared;
1152
1153 /*
1154 * return to the caller's memory context and close the rel
1155 */
1156 MemoryContextSwitchTo(oldcxt);
1157
1158 table_close(relation, AccessShareLock);
1159
1160 CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
1162
1163 /*
1164 * initialize cache's key information
1165 */
1166 for (i = 0; i < cache->cc_nkeys; ++i)
1167 {
1168 Oid keytype;
1169 RegProcedure eqfunc;
1170
1172
1173 if (cache->cc_keyno[i] > 0)
1174 {
1175 Form_pg_attribute attr = TupleDescAttr(tupdesc,
1176 cache->cc_keyno[i] - 1);
1177
1178 keytype = attr->atttypid;
1179 /* cache key columns should always be NOT NULL */
1180 Assert(attr->attnotnull);
1181 }
1182 else
1183 {
1184 if (cache->cc_keyno[i] < 0)
1185 elog(FATAL, "sys attributes are not supported in caches");
1186 keytype = OIDOID;
1187 }
1188
1189 GetCCHashEqFuncs(keytype,
1190 &cache->cc_hashfunc[i],
1191 &eqfunc,
1192 &cache->cc_fastequal[i]);
1193
1194 /*
1195 * Do equality-function lookup (we assume this won't need a catalog
1196 * lookup for any supported type)
1197 */
1198 fmgr_info_cxt(eqfunc,
1201
1202 /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1204
1205 /* Fill in sk_strategy as well --- always standard equality */
1208 /* If a catcache key requires a collation, it must be C collation */
1209 cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1210
1211 CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1212 cache->cc_relname, i, cache);
1213 }
1214
1215 /*
1216 * mark this cache fully initialized
1217 */
1218 cache->cc_tupdesc = tupdesc;
1219}
1220
1221/*
1222 * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1223 *
1224 * One reason to call this routine is to ensure that the relcache has
1225 * created entries for all the catalogs and indexes referenced by catcaches.
1226 * Therefore, provide an option to open the index as well as fixing the
1227 * cache itself. An exception is the indexes on pg_am, which we don't use
1228 * (cf. IndexScanOK).
1229 */
1230void
1232{
1234
1235 if (touch_index &&
1236 cache->id != AMOID &&
1237 cache->id != AMNAME)
1238 {
1239 Relation idesc;
1240
1241 /*
1242 * We must lock the underlying catalog before opening the index to
1243 * avoid deadlock, since index_open could possibly result in reading
1244 * this same catalog, and if anyone else is exclusive-locking this
1245 * catalog and index they'll be doing it in that order.
1246 */
1249
1250 /*
1251 * While we've got the index open, let's check that it's unique (and
1252 * not just deferrable-unique, thank you very much). This is just to
1253 * catch thinkos in definitions of new catcaches, so we don't worry
1254 * about the pg_am indexes not getting tested.
1255 */
1256 Assert(idesc->rd_index->indisunique &&
1257 idesc->rd_index->indimmediate);
1258
1261 }
1262}
1263
1264
1265/*
1266 * IndexScanOK
1267 *
1268 * This function checks for tuples that will be fetched by
1269 * IndexSupportInitialize() during relcache initialization for
1270 * certain system indexes that support critical syscaches.
1271 * We can't use an indexscan to fetch these, else we'll get into
1272 * infinite recursion. A plain heap scan will work, however.
1273 * Once we have completed relcache initialization (signaled by
1274 * criticalRelcachesBuilt), we don't have to worry anymore.
1275 *
1276 * Similarly, during backend startup we have to be able to use the
1277 * pg_authid, pg_auth_members and pg_database syscaches for
1278 * authentication even if we don't yet have relcache entries for those
1279 * catalogs' indexes.
1280 */
1281static bool
1283{
1284 switch (cache->id)
1285 {
1286 case INDEXRELID:
1287
1288 /*
1289 * Rather than tracking exactly which indexes have to be loaded
1290 * before we can use indexscans (which changes from time to time),
1291 * just force all pg_index searches to be heap scans until we've
1292 * built the critical relcaches.
1293 */
1295 return false;
1296 break;
1297
1298 case AMOID:
1299 case AMNAME:
1300
1301 /*
1302 * Always do heap scans in pg_am, because it's so small there's
1303 * not much point in an indexscan anyway. We *must* do this when
1304 * initially building critical relcache entries, but we might as
1305 * well just always do it.
1306 */
1307 return false;
1308
1309 case AUTHNAME:
1310 case AUTHOID:
1311 case AUTHMEMMEMROLE:
1312 case DATABASEOID:
1313
1314 /*
1315 * Protect authentication lookups occurring before relcache has
1316 * collected entries for shared indexes.
1317 */
1319 return false;
1320 break;
1321
1322 default:
1323 break;
1324 }
1325
1326 /* Normal case, allow index scan */
1327 return true;
1328}
1329
1330/*
1331 * SearchCatCache
1332 *
1333 * This call searches a system cache for a tuple, opening the relation
1334 * if necessary (on the first access to a particular cache).
1335 *
1336 * The result is NULL if not found, or a pointer to a HeapTuple in
1337 * the cache. The caller must not modify the tuple, and must call
1338 * ReleaseCatCache() when done with it.
1339 *
1340 * The search key values should be expressed as Datums of the key columns'
1341 * datatype(s). (Pass zeroes for any unused parameters.) As a special
1342 * exception, the passed-in key for a NAME column can be just a C string;
1343 * the caller need not go to the trouble of converting it to a fully
1344 * null-padded NAME.
1345 */
1348 Datum v1,
1349 Datum v2,
1350 Datum v3,
1351 Datum v4)
1352{
1353 return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1354}
1355
1356
1357/*
1358 * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1359 * arguments. The compiler can inline the body and unroll loops, making them a
1360 * bit faster than SearchCatCache().
1361 */
1362
1365 Datum v1)
1366{
1367 return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1368}
1369
1370
1373 Datum v1, Datum v2)
1374{
1375 return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1376}
1377
1378
1381 Datum v1, Datum v2, Datum v3)
1382{
1383 return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1384}
1385
1386
1389 Datum v1, Datum v2, Datum v3, Datum v4)
1390{
1391 return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1392}
1393
1394/*
1395 * Work-horse for SearchCatCache/SearchCatCacheN.
1396 */
1397static inline HeapTuple
1399 int nkeys,
1400 Datum v1,
1401 Datum v2,
1402 Datum v3,
1403 Datum v4)
1404{
1406 uint32 hashValue;
1407 Index hashIndex;
1408 dlist_iter iter;
1409 dlist_head *bucket;
1410 CatCTup *ct;
1411
1412 Assert(cache->cc_nkeys == nkeys);
1413
1414 /*
1415 * one-time startup overhead for each cache
1416 */
1418
1419#ifdef CATCACHE_STATS
1420 cache->cc_searches++;
1421#endif
1422
1423 /* Initialize local parameter array */
1424 arguments[0] = v1;
1425 arguments[1] = v2;
1426 arguments[2] = v3;
1427 arguments[3] = v4;
1428
1429 /*
1430 * find the hash bucket in which to look for the tuple
1431 */
1432 hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1433 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1434
1435 /*
1436 * scan the hash bucket until we find a match or exhaust our tuples
1437 *
1438 * Note: it's okay to use dlist_foreach here, even though we modify the
1439 * dlist within the loop, because we don't continue the loop afterwards.
1440 */
1441 bucket = &cache->cc_bucket[hashIndex];
1442 dlist_foreach(iter, bucket)
1443 {
1444 ct = dlist_container(CatCTup, cache_elem, iter.cur);
1445
1446 if (ct->dead)
1447 continue; /* ignore dead entries */
1448
1449 if (ct->hash_value != hashValue)
1450 continue; /* quickly skip entry if wrong hash val */
1451
1452 if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1453 continue;
1454
1455 /*
1456 * We found a match in the cache. Move it to the front of the list
1457 * for its hashbucket, in order to speed subsequent searches. (The
1458 * most frequently accessed elements in any hashbucket will tend to be
1459 * near the front of the hashbucket's list.)
1460 */
1461 dlist_move_head(bucket, &ct->cache_elem);
1462
1463 /*
1464 * If it's a positive entry, bump its refcount and return it. If it's
1465 * negative, we can report failure to the caller.
1466 */
1467 if (!ct->negative)
1468 {
1470 ct->refcount++;
1472
1473 CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1474 cache->cc_relname, hashIndex);
1475
1476#ifdef CATCACHE_STATS
1477 cache->cc_hits++;
1478#endif
1479
1480 return &ct->tuple;
1481 }
1482 else
1483 {
1484 CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1485 cache->cc_relname, hashIndex);
1486
1487#ifdef CATCACHE_STATS
1488 cache->cc_neg_hits++;
1489#endif
1490
1491 return NULL;
1492 }
1493 }
1494
1495 return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1496}
1497
1498/*
1499 * Search the actual catalogs, rather than the cache.
1500 *
1501 * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1502 * as small as possible. To avoid that effort being undone by a helpful
1503 * compiler, try to explicitly forbid inlining.
1504 */
1507 int nkeys,
1508 uint32 hashValue,
1509 Index hashIndex,
1510 Datum v1,
1511 Datum v2,
1512 Datum v3,
1513 Datum v4)
1514{
1515 ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1516 Relation relation;
1517 SysScanDesc scandesc;
1518 HeapTuple ntp;
1519 CatCTup *ct;
1520 bool stale;
1522
1523 /* Initialize local parameter array */
1524 arguments[0] = v1;
1525 arguments[1] = v2;
1526 arguments[2] = v3;
1527 arguments[3] = v4;
1528
1529 /*
1530 * Tuple was not found in cache, so we have to try to retrieve it directly
1531 * from the relation. If found, we will add it to the cache; if not
1532 * found, we will add a negative cache entry instead.
1533 *
1534 * NOTE: it is possible for recursive cache lookups to occur while reading
1535 * the relation --- for example, due to shared-cache-inval messages being
1536 * processed during table_open(). This is OK. It's even possible for one
1537 * of those lookups to find and enter the very same tuple we are trying to
1538 * fetch here. If that happens, we will enter a second copy of the tuple
1539 * into the cache. The first copy will never be referenced again, and
1540 * will eventually age out of the cache, so there's no functional problem.
1541 * This case is rare enough that it's not worth expending extra cycles to
1542 * detect.
1543 *
1544 * Another case, which we *must* handle, is that the tuple could become
1545 * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
1546 * AcceptInvalidationMessages can run during TOAST table access). We do
1547 * not want to return already-stale catcache entries, so we loop around
1548 * and do the table scan again if that happens.
1549 */
1551
1552 /*
1553 * Ok, need to make a lookup in the relation, copy the scankey and fill
1554 * out any per-call fields.
1555 */
1556 memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1557 cur_skey[0].sk_argument = v1;
1558 cur_skey[1].sk_argument = v2;
1559 cur_skey[2].sk_argument = v3;
1560 cur_skey[3].sk_argument = v4;
1561
1562 do
1563 {
1564 scandesc = systable_beginscan(relation,
1567 NULL,
1568 nkeys,
1569 cur_skey);
1570
1571 ct = NULL;
1572 stale = false;
1573
1574 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1575 {
1576 ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1577 hashValue, hashIndex);
1578 /* upon failure, we must start the scan over */
1579 if (ct == NULL)
1580 {
1581 stale = true;
1582 break;
1583 }
1584 /* immediately set the refcount to 1 */
1586 ct->refcount++;
1588 break; /* assume only one match */
1589 }
1590
1591 systable_endscan(scandesc);
1592 } while (stale);
1593
1594 table_close(relation, AccessShareLock);
1595
1596 /*
1597 * If tuple was not found, we need to build a negative cache entry
1598 * containing a fake tuple. The fake tuple has the correct key columns,
1599 * but nulls everywhere else.
1600 *
1601 * In bootstrap mode, we don't build negative entries, because the cache
1602 * invalidation mechanism isn't alive and can't clear them if the tuple
1603 * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1604 * cache inval for that.)
1605 */
1606 if (ct == NULL)
1607 {
1609 return NULL;
1610
1612 hashValue, hashIndex);
1613
1614 /* Creating a negative cache entry shouldn't fail */
1615 Assert(ct != NULL);
1616
1617 CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1619 CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1620 cache->cc_relname, hashIndex);
1621
1622 /*
1623 * We are not returning the negative entry to the caller, so leave its
1624 * refcount zero.
1625 */
1626
1627 return NULL;
1628 }
1629
1630 CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1632 CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1633 cache->cc_relname, hashIndex);
1634
1635#ifdef CATCACHE_STATS
1636 cache->cc_newloads++;
1637#endif
1638
1639 return &ct->tuple;
1640}
1641
1642/*
1643 * ReleaseCatCache
1644 *
1645 * Decrement the reference count of a catcache entry (releasing the
1646 * hold grabbed by a successful SearchCatCache).
1647 *
1648 * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1649 * will be freed as soon as their refcount goes to zero. In combination
1650 * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1651 * to catch references to already-released catcache entries.
1652 */
1653void
1655{
1657}
1658
1659static void
1661{
1662 CatCTup *ct = (CatCTup *) (((char *) tuple) -
1663 offsetof(CatCTup, tuple));
1664
1665 /* Safety checks to ensure we were handed a cache entry */
1666 Assert(ct->ct_magic == CT_MAGIC);
1667 Assert(ct->refcount > 0);
1668
1669 ct->refcount--;
1670 if (resowner)
1672
1673 if (
1674#ifndef CATCACHE_FORCE_RELEASE
1675 ct->dead &&
1676#endif
1677 ct->refcount == 0 &&
1678 (ct->c_list == NULL || ct->c_list->refcount == 0))
1680}
1681
1682
1683/*
1684 * GetCatCacheHashValue
1685 *
1686 * Compute the hash value for a given set of search keys.
1687 *
1688 * The reason for exposing this as part of the API is that the hash value is
1689 * exposed in cache invalidation operations, so there are places outside the
1690 * catcache code that need to be able to compute the hash values.
1691 */
1692uint32
1694 Datum v1,
1695 Datum v2,
1696 Datum v3,
1697 Datum v4)
1698{
1699 /*
1700 * one-time startup overhead for each cache
1701 */
1703
1704 /*
1705 * calculate the hash value
1706 */
1707 return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1708}
1709
1710
1711/*
1712 * SearchCatCacheList
1713 *
1714 * Generate a list of all tuples matching a partial key (that is,
1715 * a key specifying just the first K of the cache's N key columns).
1716 *
1717 * It doesn't make any sense to specify all of the cache's key columns
1718 * here: since the key is unique, there could be at most one match, so
1719 * you ought to use SearchCatCache() instead. Hence this function takes
1720 * one fewer Datum argument than SearchCatCache() does.
1721 *
1722 * The caller must not modify the list object or the pointed-to tuples,
1723 * and must call ReleaseCatCacheList() when done with the list.
1724 */
1725CatCList *
1727 int nkeys,
1728 Datum v1,
1729 Datum v2,
1730 Datum v3)
1731{
1732 Datum v4 = 0; /* dummy last-column value */
1734 uint32 lHashValue;
1735 Index lHashIndex;
1736 dlist_iter iter;
1737 dlist_head *lbucket;
1738 CatCList *cl;
1739 CatCTup *ct;
1740 List *volatile ctlist;
1741 ListCell *ctlist_item;
1742 int nmembers;
1743 bool ordered;
1744 HeapTuple ntp;
1745 MemoryContext oldcxt;
1746 int i;
1747 CatCInProgress *save_in_progress;
1748 CatCInProgress in_progress_ent;
1749
1750 /*
1751 * one-time startup overhead for each cache
1752 */
1754
1755 Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1756
1757#ifdef CATCACHE_STATS
1758 cache->cc_lsearches++;
1759#endif
1760
1761 /* Initialize local parameter array */
1762 arguments[0] = v1;
1763 arguments[1] = v2;
1764 arguments[2] = v3;
1765 arguments[3] = v4;
1766
1767 /*
1768 * If we haven't previously done a list search in this cache, create the
1769 * bucket header array; otherwise, consider whether it's time to enlarge
1770 * it.
1771 */
1772 if (cache->cc_lbucket == NULL)
1773 {
1774 /* Arbitrary initial size --- must be a power of 2 */
1775 int nbuckets = 16;
1776
1779 nbuckets * sizeof(dlist_head));
1780 /* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */
1781 cache->cc_nlbuckets = nbuckets;
1782 }
1783 else
1784 {
1785 /*
1786 * If the hash table has become too full, enlarge the buckets array.
1787 * Quite arbitrarily, we enlarge when fill factor > 2.
1788 */
1789 if (cache->cc_nlist > cache->cc_nlbuckets * 2)
1791 }
1792
1793 /*
1794 * Find the hash bucket in which to look for the CatCList.
1795 */
1796 lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1797 lHashIndex = HASH_INDEX(lHashValue, cache->cc_nlbuckets);
1798
1799 /*
1800 * scan the items until we find a match or exhaust our list
1801 *
1802 * Note: it's okay to use dlist_foreach here, even though we modify the
1803 * dlist within the loop, because we don't continue the loop afterwards.
1804 */
1805 lbucket = &cache->cc_lbucket[lHashIndex];
1806 dlist_foreach(iter, lbucket)
1807 {
1808 cl = dlist_container(CatCList, cache_elem, iter.cur);
1809
1810 if (cl->dead)
1811 continue; /* ignore dead entries */
1812
1813 if (cl->hash_value != lHashValue)
1814 continue; /* quickly skip entry if wrong hash val */
1815
1816 /*
1817 * see if the cached list matches our key.
1818 */
1819 if (cl->nkeys != nkeys)
1820 continue;
1821
1822 if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1823 continue;
1824
1825 /*
1826 * We found a matching list. Move the list to the front of the list
1827 * for its hashbucket, so as to speed subsequent searches. (We do not
1828 * move the members to the fronts of their hashbucket lists, however,
1829 * since there's no point in that unless they are searched for
1830 * individually.)
1831 */
1832 dlist_move_head(lbucket, &cl->cache_elem);
1833
1834 /* Bump the list's refcount and return it */
1836 cl->refcount++;
1838
1839 CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1840 cache->cc_relname);
1841
1842#ifdef CATCACHE_STATS
1843 cache->cc_lhits++;
1844#endif
1845
1846 return cl;
1847 }
1848
1849 /*
1850 * List was not found in cache, so we have to build it by reading the
1851 * relation. For each matching tuple found in the relation, use an
1852 * existing cache entry if possible, else build a new one.
1853 *
1854 * We have to bump the member refcounts temporarily to ensure they won't
1855 * get dropped from the cache while loading other members. We use a PG_TRY
1856 * block to ensure we can undo those refcounts if we get an error before
1857 * we finish constructing the CatCList. ctlist must be valid throughout
1858 * the PG_TRY block.
1859 */
1860 ctlist = NIL;
1861
1862 /*
1863 * Cache invalidation can happen while we're building the list.
1864 * CatalogCacheCreateEntry() handles concurrent invalidation of individual
1865 * tuples, but it's also possible that a new entry is concurrently added
1866 * that should be part of the list we're building. Register an
1867 * "in-progress" entry that will receive the invalidation, until we have
1868 * built the final list entry.
1869 */
1870 save_in_progress = catcache_in_progress_stack;
1871 in_progress_ent.next = catcache_in_progress_stack;
1872 in_progress_ent.cache = cache;
1873 in_progress_ent.hash_value = lHashValue;
1874 in_progress_ent.list = true;
1875 in_progress_ent.dead = false;
1876 catcache_in_progress_stack = &in_progress_ent;
1877
1878 PG_TRY();
1879 {
1880 ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1881 Relation relation;
1882 SysScanDesc scandesc;
1883 bool first_iter = true;
1884
1886
1887 /*
1888 * Ok, need to make a lookup in the relation, copy the scankey and
1889 * fill out any per-call fields.
1890 */
1891 memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1892 cur_skey[0].sk_argument = v1;
1893 cur_skey[1].sk_argument = v2;
1894 cur_skey[2].sk_argument = v3;
1895 cur_skey[3].sk_argument = v4;
1896
1897 /*
1898 * Scan the table for matching entries. If an invalidation arrives
1899 * mid-build, we will loop back here to retry.
1900 */
1901 do
1902 {
1903 /*
1904 * If we are retrying, release refcounts on any items created on
1905 * the previous iteration. We dare not try to free them if
1906 * they're now unreferenced, since an error while doing that would
1907 * result in the PG_CATCH below doing extra refcount decrements.
1908 * Besides, we'll likely re-adopt those items in the next
1909 * iteration, so it's not worth complicating matters to try to get
1910 * rid of them.
1911 */
1912 foreach(ctlist_item, ctlist)
1913 {
1914 ct = (CatCTup *) lfirst(ctlist_item);
1915 Assert(ct->c_list == NULL);
1916 Assert(ct->refcount > 0);
1917 ct->refcount--;
1918 }
1919 /* Reset ctlist in preparation for new try */
1920 ctlist = NIL;
1921 in_progress_ent.dead = false;
1922
1923 scandesc = systable_beginscan(relation,
1926 NULL,
1927 nkeys,
1928 cur_skey);
1929
1930 /* The list will be ordered iff we are doing an index scan */
1931 ordered = (scandesc->irel != NULL);
1932
1933 /* Injection point to help testing the recursive invalidation case */
1934 if (first_iter)
1935 {
1936 INJECTION_POINT("catcache-list-miss-systable-scan-started", NULL);
1937 first_iter = false;
1938 }
1939
1940 while (HeapTupleIsValid(ntp = systable_getnext(scandesc)) &&
1941 !in_progress_ent.dead)
1942 {
1943 uint32 hashValue;
1944 Index hashIndex;
1945 bool found = false;
1946 dlist_head *bucket;
1947
1948 /*
1949 * See if there's an entry for this tuple already.
1950 */
1951 ct = NULL;
1953 hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1954
1955 bucket = &cache->cc_bucket[hashIndex];
1956 dlist_foreach(iter, bucket)
1957 {
1958 ct = dlist_container(CatCTup, cache_elem, iter.cur);
1959
1960 if (ct->dead || ct->negative)
1961 continue; /* ignore dead and negative entries */
1962
1963 if (ct->hash_value != hashValue)
1964 continue; /* quickly skip entry if wrong hash val */
1965
1966 if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1967 continue; /* not same tuple */
1968
1969 /*
1970 * Found a match, but can't use it if it belongs to
1971 * another list already
1972 */
1973 if (ct->c_list)
1974 continue;
1975
1976 found = true;
1977 break; /* A-OK */
1978 }
1979
1980 if (!found)
1981 {
1982 /* We didn't find a usable entry, so make a new one */
1983 ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1984 hashValue, hashIndex);
1985
1986 /* upon failure, we must start the scan over */
1987 if (ct == NULL)
1988 {
1989 in_progress_ent.dead = true;
1990 break;
1991 }
1992 }
1993
1994 /* Careful here: add entry to ctlist, then bump its refcount */
1995 /* This way leaves state correct if lappend runs out of memory */
1996 ctlist = lappend(ctlist, ct);
1997 ct->refcount++;
1998 }
1999
2000 systable_endscan(scandesc);
2001 } while (in_progress_ent.dead);
2002
2003 table_close(relation, AccessShareLock);
2004
2005 /* Make sure the resource owner has room to remember this entry. */
2007
2008 /* Now we can build the CatCList entry. */
2010 nmembers = list_length(ctlist);
2011 cl = (CatCList *)
2012 palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
2013
2014 /* Extract key values */
2016 arguments, cl->keys);
2017 MemoryContextSwitchTo(oldcxt);
2018
2019 /*
2020 * We are now past the last thing that could trigger an elog before we
2021 * have finished building the CatCList and remembering it in the
2022 * resource owner. So it's OK to fall out of the PG_TRY, and indeed
2023 * we'd better do so before we start marking the members as belonging
2024 * to the list.
2025 */
2026 }
2027 PG_CATCH();
2028 {
2029 Assert(catcache_in_progress_stack == &in_progress_ent);
2030 catcache_in_progress_stack = save_in_progress;
2031
2032 foreach(ctlist_item, ctlist)
2033 {
2034 ct = (CatCTup *) lfirst(ctlist_item);
2035 Assert(ct->c_list == NULL);
2036 Assert(ct->refcount > 0);
2037 ct->refcount--;
2038 if (
2039#ifndef CATCACHE_FORCE_RELEASE
2040 ct->dead &&
2041#endif
2042 ct->refcount == 0 &&
2043 (ct->c_list == NULL || ct->c_list->refcount == 0))
2045 }
2046
2047 PG_RE_THROW();
2048 }
2049 PG_END_TRY();
2050 Assert(catcache_in_progress_stack == &in_progress_ent);
2051 catcache_in_progress_stack = save_in_progress;
2052
2053 cl->cl_magic = CL_MAGIC;
2054 cl->my_cache = cache;
2055 cl->refcount = 0; /* for the moment */
2056 cl->dead = false;
2057 cl->ordered = ordered;
2058 cl->nkeys = nkeys;
2059 cl->hash_value = lHashValue;
2060 cl->n_members = nmembers;
2061
2062 i = 0;
2063 foreach(ctlist_item, ctlist)
2064 {
2065 cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
2066 Assert(ct->c_list == NULL);
2067 ct->c_list = cl;
2068 /* release the temporary refcount on the member */
2069 Assert(ct->refcount > 0);
2070 ct->refcount--;
2071 /* mark list dead if any members already dead */
2072 if (ct->dead)
2073 cl->dead = true;
2074 }
2075 Assert(i == nmembers);
2076
2077 /*
2078 * Add the CatCList to the appropriate bucket, and count it.
2079 */
2080 dlist_push_head(lbucket, &cl->cache_elem);
2081
2082 cache->cc_nlist++;
2083
2084 /* Finally, bump the list's refcount and return it */
2085 cl->refcount++;
2087
2088 CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
2089 cache->cc_relname, nmembers);
2090
2091 return cl;
2092}
2093
2094/*
2095 * ReleaseCatCacheList
2096 *
2097 * Decrement the reference count of a catcache list.
2098 */
2099void
2101{
2103}
2104
2105static void
2107{
2108 /* Safety checks to ensure we were handed a cache entry */
2109 Assert(list->cl_magic == CL_MAGIC);
2110 Assert(list->refcount > 0);
2111 list->refcount--;
2112 if (resowner)
2114
2115 if (
2116#ifndef CATCACHE_FORCE_RELEASE
2117 list->dead &&
2118#endif
2119 list->refcount == 0)
2120 CatCacheRemoveCList(list->my_cache, list);
2121}
2122
2123
2124/*
2125 * CatalogCacheCreateEntry
2126 * Create a new CatCTup entry, copying the given HeapTuple and other
2127 * supplied data into it. The new entry initially has refcount 0.
2128 *
2129 * To create a normal cache entry, ntp must be the HeapTuple just fetched
2130 * from scandesc, and "arguments" is not used. To create a negative cache
2131 * entry, pass NULL for ntp; then "arguments" is the cache keys to use.
2132 * In either case, hashValue/hashIndex are the hash values computed from
2133 * the cache keys.
2134 *
2135 * Returns NULL if we attempt to detoast the tuple and observe that it
2136 * became stale. (This cannot happen for a negative entry.) Caller must
2137 * retry the tuple lookup in that case.
2138 */
2139static CatCTup *
2141 uint32 hashValue, Index hashIndex)
2142{
2143 CatCTup *ct;
2144 MemoryContext oldcxt;
2145
2146 if (ntp)
2147 {
2148 int i;
2149 HeapTuple dtp = NULL;
2150
2151 /*
2152 * The invalidation of the in-progress entry essentially never happens
2153 * during our regression tests, and there's no easy way to force it to
2154 * fail for testing purposes. To ensure we have test coverage for the
2155 * retry paths in our callers, make debug builds randomly fail about
2156 * 0.1% of the times through this code path, even when there's no
2157 * toasted fields.
2158 */
2159#ifdef USE_ASSERT_CHECKING
2161 return NULL;
2162#endif
2163
2164 /*
2165 * If there are any out-of-line toasted fields in the tuple, expand
2166 * them in-line. This saves cycles during later use of the catcache
2167 * entry, and also protects us against the possibility of the toast
2168 * tuples being freed before we attempt to fetch them, in case of
2169 * something using a slightly stale catcache entry.
2170 */
2171 if (HeapTupleHasExternal(ntp))
2172 {
2173 CatCInProgress *save_in_progress;
2174 CatCInProgress in_progress_ent;
2175
2176 /*
2177 * The tuple could become stale while we are doing toast table
2178 * access (since AcceptInvalidationMessages can run then). The
2179 * invalidation will mark our in-progress entry as dead.
2180 */
2181 save_in_progress = catcache_in_progress_stack;
2182 in_progress_ent.next = catcache_in_progress_stack;
2183 in_progress_ent.cache = cache;
2184 in_progress_ent.hash_value = hashValue;
2185 in_progress_ent.list = false;
2186 in_progress_ent.dead = false;
2187 catcache_in_progress_stack = &in_progress_ent;
2188
2189 PG_TRY();
2190 {
2192 }
2193 PG_FINALLY();
2194 {
2195 Assert(catcache_in_progress_stack == &in_progress_ent);
2196 catcache_in_progress_stack = save_in_progress;
2197 }
2198 PG_END_TRY();
2199
2200 if (in_progress_ent.dead)
2201 {
2202 heap_freetuple(dtp);
2203 return NULL;
2204 }
2205 }
2206 else
2207 dtp = ntp;
2208
2209 /* Allocate memory for CatCTup and the cached tuple in one go */
2211
2212 ct = (CatCTup *) palloc(sizeof(CatCTup) +
2213 MAXIMUM_ALIGNOF + dtp->t_len);
2214 ct->tuple.t_len = dtp->t_len;
2215 ct->tuple.t_self = dtp->t_self;
2216 ct->tuple.t_tableOid = dtp->t_tableOid;
2218 MAXALIGN(((char *) ct) + sizeof(CatCTup));
2219 /* copy tuple contents */
2220 memcpy((char *) ct->tuple.t_data,
2221 (const char *) dtp->t_data,
2222 dtp->t_len);
2223 MemoryContextSwitchTo(oldcxt);
2224
2225 if (dtp != ntp)
2226 heap_freetuple(dtp);
2227
2228 /* extract keys - they'll point into the tuple if not by-value */
2229 for (i = 0; i < cache->cc_nkeys; i++)
2230 {
2231 Datum atp;
2232 bool isnull;
2233
2234 atp = heap_getattr(&ct->tuple,
2235 cache->cc_keyno[i],
2237 &isnull);
2238 Assert(!isnull);
2239 ct->keys[i] = atp;
2240 }
2241 }
2242 else
2243 {
2244 /* Set up keys for a negative cache entry */
2246 ct = (CatCTup *) palloc(sizeof(CatCTup));
2247
2248 /*
2249 * Store keys - they'll point into separately allocated memory if not
2250 * by-value.
2251 */
2253 arguments, ct->keys);
2254 MemoryContextSwitchTo(oldcxt);
2255 }
2256
2257 /*
2258 * Finish initializing the CatCTup header, and add it to the cache's
2259 * linked list and counts.
2260 */
2261 ct->ct_magic = CT_MAGIC;
2262 ct->my_cache = cache;
2263 ct->c_list = NULL;
2264 ct->refcount = 0; /* for the moment */
2265 ct->dead = false;
2266 ct->negative = (ntp == NULL);
2267 ct->hash_value = hashValue;
2268
2269 dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
2270
2271 cache->cc_ntup++;
2272 CacheHdr->ch_ntup++;
2273
2274 /*
2275 * If the hash table has become too full, enlarge the buckets array. Quite
2276 * arbitrarily, we enlarge when fill factor > 2.
2277 */
2278 if (cache->cc_ntup > cache->cc_nbuckets * 2)
2280
2281 return ct;
2282}
2283
2284/*
2285 * Helper routine that frees keys stored in the keys array.
2286 */
2287static void
2288CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, const int *attnos, const Datum *keys)
2289{
2290 int i;
2291
2292 for (i = 0; i < nkeys; i++)
2293 {
2294 int attnum = attnos[i];
2295
2296 /* system attribute are not supported in caches */
2297 Assert(attnum > 0);
2298
2299 if (!TupleDescCompactAttr(tupdesc, attnum - 1)->attbyval)
2300 pfree(DatumGetPointer(keys[i]));
2301 }
2302}
2303
2304/*
2305 * Helper routine that copies the keys in the srckeys array into the dstkeys
2306 * one, guaranteeing that the datums are fully allocated in the current memory
2307 * context.
2308 */
2309static void
2310CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, const int *attnos,
2311 const Datum *srckeys, Datum *dstkeys)
2312{
2313 int i;
2314
2315 /*
2316 * XXX: memory and lookup performance could possibly be improved by
2317 * storing all keys in one allocation.
2318 */
2319
2320 for (i = 0; i < nkeys; i++)
2321 {
2322 int attnum = attnos[i];
2323 Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2324 Datum src = srckeys[i];
2325 NameData srcname;
2326
2327 /*
2328 * Must be careful in case the caller passed a C string where a NAME
2329 * is wanted: convert the given argument to a correctly padded NAME.
2330 * Otherwise the memcpy() done by datumCopy() could fall off the end
2331 * of memory.
2332 */
2333 if (att->atttypid == NAMEOID)
2334 {
2335 namestrcpy(&srcname, DatumGetCString(src));
2336 src = NameGetDatum(&srcname);
2337 }
2338
2339 dstkeys[i] = datumCopy(src,
2340 att->attbyval,
2341 att->attlen);
2342 }
2343}
2344
2345/*
2346 * PrepareToInvalidateCacheTuple()
2347 *
2348 * This is part of a rather subtle chain of events, so pay attention:
2349 *
2350 * When a tuple is inserted or deleted, it cannot be flushed from the
2351 * catcaches immediately, for reasons explained at the top of cache/inval.c.
2352 * Instead we have to add entry(s) for the tuple to a list of pending tuple
2353 * invalidations that will be done at the end of the command or transaction.
2354 *
2355 * The lists of tuples that need to be flushed are kept by inval.c. This
2356 * routine is a helper routine for inval.c. Given a tuple belonging to
2357 * the specified relation, find all catcaches it could be in, compute the
2358 * correct hash value for each such catcache, and call the specified
2359 * function to record the cache id and hash value in inval.c's lists.
2360 * SysCacheInvalidate will be called later, if appropriate,
2361 * using the recorded information.
2362 *
2363 * For an insert or delete, tuple is the target tuple and newtuple is NULL.
2364 * For an update, we are called just once, with tuple being the old tuple
2365 * version and newtuple the new version. We should make two list entries
2366 * if the tuple's hash value changed, but only one if it didn't.
2367 *
2368 * Note that it is irrelevant whether the given tuple is actually loaded
2369 * into the catcache at the moment. Even if it's not there now, it might
2370 * be by the end of the command, or there might be a matching negative entry
2371 * to flush --- or other backends' caches might have such entries --- so
2372 * we have to make list entries to flush it later.
2373 *
2374 * Also note that it's not an error if there are no catcaches for the
2375 * specified relation. inval.c doesn't know exactly which rels have
2376 * catcaches --- it will call this routine for any tuple that's in a
2377 * system relation.
2378 */
2379void
2381 HeapTuple tuple,
2382 HeapTuple newtuple,
2383 void (*function) (int, uint32, Oid, void *),
2384 void *context)
2385{
2386 slist_iter iter;
2387 Oid reloid;
2388
2389 CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2390
2391 /*
2392 * sanity checks
2393 */
2394 Assert(RelationIsValid(relation));
2395 Assert(HeapTupleIsValid(tuple));
2397 Assert(CacheHdr != NULL);
2398
2399 reloid = RelationGetRelid(relation);
2400
2401 /* ----------------
2402 * for each cache
2403 * if the cache contains tuples from the specified relation
2404 * compute the tuple's hash value(s) in this cache,
2405 * and call the passed function to register the information.
2406 * ----------------
2407 */
2408
2410 {
2411 CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2412 uint32 hashvalue;
2413 Oid dbid;
2414
2415 if (ccp->cc_reloid != reloid)
2416 continue;
2417
2418 /* Just in case cache hasn't finished initialization yet... */
2420
2421 hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2422 dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2423
2424 (*function) (ccp->id, hashvalue, dbid, context);
2425
2426 if (newtuple)
2427 {
2428 uint32 newhashvalue;
2429
2430 newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2431
2432 if (newhashvalue != hashvalue)
2433 (*function) (ccp->id, newhashvalue, dbid, context);
2434 }
2435 }
2436}
2437
2438/* ResourceOwner callbacks */
2439
2440static void
2442{
2444}
2445
2446static char *
2448{
2449 HeapTuple tuple = (HeapTuple) DatumGetPointer(res);
2450 CatCTup *ct = (CatCTup *) (((char *) tuple) -
2451 offsetof(CatCTup, tuple));
2452
2453 /* Safety check to ensure we were handed a cache entry */
2454 Assert(ct->ct_magic == CT_MAGIC);
2455
2456 return psprintf("cache %s (%d), tuple %u/%u has count %d",
2457 ct->my_cache->cc_relname, ct->my_cache->id,
2460 ct->refcount);
2461}
2462
2463static void
2465{
2467}
2468
2469static char *
2471{
2473
2474 return psprintf("cache %s (%d), list %p has count %d",
2475 list->my_cache->cc_relname, list->my_cache->id,
2476 list, list->refcount);
2477}
#define AttributeNumberIsValid(attributeNumber)
Definition: attnum.h:34
#define NameStr(name)
Definition: c.h:756
#define pg_noinline
Definition: c.h:290
#define MAXALIGN(LEN)
Definition: c.h:815
#define PG_UINT32_MAX
Definition: c.h:600
#define pg_attribute_always_inline
Definition: c.h:274
regproc RegProcedure
Definition: c.h:660
int32_t int32
Definition: c.h:539
uint64_t uint64
Definition: c.h:544
#define unlikely(x)
Definition: c.h:407
uint32_t uint32
Definition: c.h:543
unsigned int Index
Definition: c.h:624
struct CatCInProgress CatCInProgress
static bool chareqfast(Datum a, Datum b)
Definition: catcache.c:191
CatCache * InitCatCache(int id, Oid reloid, Oid indexoid, int nkeys, const int *key, int nbuckets)
Definition: catcache.c:885
HeapTuple SearchCatCache2(CatCache *cache, Datum v1, Datum v2)
Definition: catcache.c:1372
static bool int4eqfast(Datum a, Datum b)
Definition: catcache.c:232
HeapTuple SearchCatCache3(CatCache *cache, Datum v1, Datum v2, Datum v3)
Definition: catcache.c:1380
void ReleaseCatCacheList(CatCList *list)
Definition: catcache.c:2100
static void CatalogCacheInitializeCache(CatCache *cache)
Definition: catcache.c:1122
static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache, int nkeys, uint32 hashValue, Index hashIndex, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1506
static bool int2eqfast(Datum a, Datum b)
Definition: catcache.c:220
static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner)
Definition: catcache.c:1660
static uint32 int4hashfast(Datum datum)
Definition: catcache.c:238
void InitCatCachePhase2(CatCache *cache, bool touch_index)
Definition: catcache.c:1231
void ResetCatalogCaches(void)
Definition: catcache.c:805
CatCList * SearchCatCacheList(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3)
Definition: catcache.c:1726
static void ResOwnerReleaseCatCache(Datum res)
Definition: catcache.c:2441
uint32 GetCatCacheHashValue(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1693
static CatCInProgress * catcache_in_progress_stack
Definition: catcache.c:61
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
Definition: catcache.c:535
static char * ResOwnerPrintCatCache(Datum res)
Definition: catcache.c:2447
static void RehashCatCache(CatCache *cp)
Definition: catcache.c:992
static void ResetCatalogCache(CatCache *cache, bool debug_discard)
Definition: catcache.c:743
static CatCTup * CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments, uint32 hashValue, Index hashIndex)
Definition: catcache.c:2140
static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
Definition: catcache.c:387
HeapTuple SearchCatCache4(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1388
static void ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: catcache.c:174
static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, const int *attnos, const Datum *keys)
Definition: catcache.c:2288
static const ResourceOwnerDesc catcache_resowner_desc
Definition: catcache.c:137
static void ResOwnerReleaseCatCacheList(Datum res)
Definition: catcache.c:2464
static void ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: catcache.c:159
void PrepareToInvalidateCacheTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple, void(*function)(int, uint32, Oid, void *), void *context)
Definition: catcache.c:2380
#define CatalogCacheInitializeCache_DEBUG1
Definition: catcache.c:1117
static HeapTuple SearchCatCacheInternal(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1398
static pg_attribute_always_inline void ConditionalCatalogCacheInitializeCache(CatCache *cache)
Definition: catcache.c:1071
static char * ResOwnerPrintCatCacheList(Datum res)
Definition: catcache.c:2470
static void ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner)
Definition: catcache.c:2106
static CatCacheHeader * CacheHdr
Definition: catcache.c:84
static uint32 namehashfast(Datum datum)
Definition: catcache.c:212
void CreateCacheMemoryContext(void)
Definition: catcache.c:715
static const ResourceOwnerDesc catlistref_resowner_desc
Definition: catcache.c:147
static bool IndexScanOK(CatCache *cache)
Definition: catcache.c:1282
static void GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
Definition: catcache.c:274
static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:345
static bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys, const Datum *cachekeys, const Datum *searchkeys)
Definition: catcache.c:442
void CatCacheInvalidate(CatCache *cache, uint32 hashValue)
Definition: catcache.c:632
static void ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
Definition: catcache.c:164
static bool nameeqfast(Datum a, Datum b)
Definition: catcache.c:203
static uint32 charhashfast(Datum datum)
Definition: catcache.c:197
static void RehashCatCacheLists(CatCache *cp)
Definition: catcache.c:1030
HeapTuple SearchCatCache1(CatCache *cache, Datum v1)
Definition: catcache.c:1364
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, const int *attnos, const Datum *srckeys, Datum *dstkeys)
Definition: catcache.c:2310
#define InitCatCache_DEBUG2
Definition: catcache.c:881
static uint32 oidvectorhashfast(Datum datum)
Definition: catcache.c:267
static void ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
Definition: catcache.c:169
static bool texteqfast(Datum a, Datum b)
Definition: catcache.c:244
#define CACHE_elog(...)
Definition: catcache.c:80
static bool oidvectoreqfast(Datum a, Datum b)
Definition: catcache.c:261
void CatalogCacheFlushCatalog(Oid catId)
Definition: catcache.c:841
static uint32 int2hashfast(Datum datum)
Definition: catcache.c:226
#define CatalogCacheInitializeCache_DEBUG2
Definition: catcache.c:1118
static void CatCacheRemoveCList(CatCache *cache, CatCList *cl)
Definition: catcache.c:577
#define HASH_INDEX(h, sz)
Definition: catcache.c:70
static uint32 texthashfast(Datum datum)
Definition: catcache.c:254
void ReleaseCatCache(HeapTuple tuple)
Definition: catcache.c:1654
HeapTuple SearchCatCache(CatCache *cache, Datum v1, Datum v2, Datum v3, Datum v4)
Definition: catcache.c:1347
void ResetCatalogCachesExt(bool debug_discard)
Definition: catcache.c:811
#define CT_MAGIC
Definition: catcache.h:99
uint32(* CCHashFN)(Datum datum)
Definition: catcache.h:39
#define CATCACHE_MAXKEYS
Definition: catcache.h:35
bool(* CCFastEqualFN)(Datum a, Datum b)
Definition: catcache.h:42
#define CL_MAGIC
Definition: catcache.h:169
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
#define PG_RE_THROW()
Definition: elog.h:405
#define FATAL
Definition: elog.h:41
#define PG_TRY(...)
Definition: elog.h:372
#define DEBUG2
Definition: elog.h:29
#define PG_END_TRY(...)
Definition: elog.h:397
#define DEBUG1
Definition: elog.h:30
#define PG_CATCH(...)
Definition: elog.h:382
#define elog(elevel,...)
Definition: elog.h:226
#define PG_FINALLY(...)
Definition: elog.h:389
#define MCXT_ALLOC_ZERO
Definition: fe_memutils.h:30
Datum DirectFunctionCall2Coll(PGFunction func, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:813
void fmgr_info_cxt(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt)
Definition: fmgr.c:138
Datum DirectFunctionCall1Coll(PGFunction func, Oid collation, Datum arg1)
Definition: fmgr.c:793
#define DirectFunctionCall2(func, arg1, arg2)
Definition: fmgr.h:684
#define DirectFunctionCall1(func, arg1)
Definition: fmgr.h:682
void systable_endscan(SysScanDesc sysscan)
Definition: genam.c:603
HeapTuple systable_getnext(SysScanDesc sysscan)
Definition: genam.c:514
SysScanDesc systable_beginscan(Relation heapRelation, Oid indexId, bool indexOK, Snapshot snapshot, int nkeys, ScanKey key)
Definition: genam.c:388
Oid MyDatabaseId
Definition: globals.c:94
uint32 hash_bytes(const unsigned char *k, int keylen)
Definition: hashfn.c:146
static uint32 murmurhash32(uint32 data)
Definition: hashfn.h:92
Assert(PointerIsAligned(start, uint64))
Datum hashoidvector(PG_FUNCTION_ARGS)
Definition: hashfunc.c:232
Datum hashtext(PG_FUNCTION_ARGS)
Definition: hashfunc.c:267
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: heaptoast.c:350
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1435
HeapTupleData * HeapTuple
Definition: htup.h:71
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleIsValid(tuple)
Definition: htup.h:78
static Datum heap_getattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:904
static bool HeapTupleHasExternal(const HeapTupleData *tuple)
Definition: htup_details.h:762
static Datum fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: htup_details.h:861
#define dlist_foreach(iter, lhead)
Definition: ilist.h:623
static void dlist_delete(dlist_node *node)
Definition: ilist.h:405
static void slist_init(slist_head *head)
Definition: ilist.h:986
static void dlist_push_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:347
#define dlist_foreach_modify(iter, lhead)
Definition: ilist.h:640
static void slist_push_head(slist_head *head, slist_node *node)
Definition: ilist.h:1006
#define slist_container(type, membername, ptr)
Definition: ilist.h:1106
static void dlist_move_head(dlist_head *head, dlist_node *node)
Definition: ilist.h:467
#define slist_foreach(iter, lhead)
Definition: ilist.h:1132
#define dlist_container(type, membername, ptr)
Definition: ilist.h:593
void index_close(Relation relation, LOCKMODE lockmode)
Definition: indexam.c:177
Relation index_open(Oid relationId, LOCKMODE lockmode)
Definition: indexam.c:133
#define INJECTION_POINT(name, arg)
void CallSyscacheCallbacks(int cacheid, uint32 hashvalue)
Definition: inval.c:1894
void on_proc_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:309
int b
Definition: isn.c:74
int a
Definition: isn.c:73
int i
Definition: isn.c:77
bool ItemPointerEquals(const ItemPointerData *pointer1, const ItemPointerData *pointer2)
Definition: itemptr.c:35
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
List * lappend(List *list, void *datum)
Definition: list.c:339
void UnlockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:229
void LockRelationOid(Oid relid, LOCKMODE lockmode)
Definition: lmgr.c:107
#define AccessShareLock
Definition: lockdefs.h:36
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1263
char * pstrdup(const char *in)
Definition: mcxt.c:1759
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc0(Size size)
Definition: mcxt.c:1395
MemoryContext TopMemoryContext
Definition: mcxt.c:166
void * palloc(Size size)
Definition: mcxt.c:1365
MemoryContext CacheMemoryContext
Definition: mcxt.c:169
void * palloc_aligned(Size size, Size alignto, int flags)
Definition: mcxt.c:1584
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:477
void namestrcpy(Name name, const char *str)
Definition: name.c:233
Datum oidvectoreq(PG_FUNCTION_ARGS)
Definition: oid.c:344
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
bool attbyval
Definition: pg_attribute.h:94
int16 attnum
Definition: pg_attribute.h:74
FormData_pg_attribute * Form_pg_attribute
Definition: pg_attribute.h:202
on_exit_nicely_callback function
void * arg
static uint32 pg_rotate_left32(uint32 word, int n)
Definition: pg_bitutils.h:428
#define NAMEDATALEN
#define PG_CACHE_LINE_SIZE
#define lfirst(lc)
Definition: pg_list.h:172
static int list_length(const List *l)
Definition: pg_list.h:152
#define NIL
Definition: pg_list.h:68
uint32 pg_prng_uint32(pg_prng_state *state)
Definition: pg_prng.c:227
pg_prng_state pg_global_prng_state
Definition: pg_prng.c:34
static bool DatumGetBool(Datum X)
Definition: postgres.h:100
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332
static Name DatumGetName(Datum X)
Definition: postgres.h:370
static char * DatumGetCString(Datum X)
Definition: postgres.h:345
static Datum NameGetDatum(const NameData *X)
Definition: postgres.h:383
uint64_t Datum
Definition: postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:322
static char DatumGetChar(Datum X)
Definition: postgres.h:122
static int16 DatumGetInt16(Datum X)
Definition: postgres.h:172
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:212
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
e
Definition: preproc-init.c:82
char * psprintf(const char *fmt,...)
Definition: psprintf.c:43
#define RelationGetForm(relation)
Definition: rel.h:509
#define RelationGetRelid(relation)
Definition: rel.h:515
#define RelationGetDescr(relation)
Definition: rel.h:541
#define RelationGetRelationName(relation)
Definition: rel.h:549
#define RelationIsValid(relation)
Definition: rel.h:490
bool criticalRelcachesBuilt
Definition: relcache.c:140
bool criticalSharedRelcachesBuilt
Definition: relcache.c:146
static void AssertCouldGetRelation(void)
Definition: relcache.h:44
ResourceOwner CurrentResourceOwner
Definition: resowner.c:173
void ResourceOwnerForget(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:561
void ResourceOwnerRemember(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:521
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:449
#define RELEASE_PRIO_CATCACHE_LIST_REFS
Definition: resowner.h:72
@ RESOURCE_RELEASE_AFTER_LOCKS
Definition: resowner.h:56
#define RELEASE_PRIO_CATCACHE_REFS
Definition: resowner.h:71
#define BTEqualStrategyNumber
Definition: stratnum.h:31
uint32 hash_value
Definition: catcache.c:55
struct CatCInProgress * next
Definition: catcache.c:58
CatCache * cache
Definition: catcache.c:54
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
Definition: pg_list.h:54
Form_pg_index rd_index
Definition: rel.h:192
const char * name
Definition: resowner.h:93
Datum sk_argument
Definition: skey.h:72
FmgrInfo sk_func
Definition: skey.h:71
Oid sk_subtype
Definition: skey.h:69
Oid sk_collation
Definition: skey.h:70
StrategyNumber sk_strategy
Definition: skey.h:68
AttrNumber sk_attno
Definition: skey.h:67
Relation irel
Definition: relscan.h:210
const char * cc_relname
Definition: catcache.h:59
CCHashFN cc_hashfunc[CATCACHE_MAXKEYS]
Definition: catcache.h:50
dlist_head * cc_bucket
Definition: catcache.h:49
slist_node cc_next
Definition: catcache.h:63
Oid cc_reloid
Definition: catcache.h:60
int cc_nkeys
Definition: catcache.h:54
int cc_keyno[CATCACHE_MAXKEYS]
Definition: catcache.h:53
CCFastEqualFN cc_fastequal[CATCACHE_MAXKEYS]
Definition: catcache.h:51
Oid cc_indexoid
Definition: catcache.h:61
int cc_nbuckets
Definition: catcache.h:47
bool cc_relisshared
Definition: catcache.h:62
int cc_ntup
Definition: catcache.h:55
ScanKeyData cc_skey[CATCACHE_MAXKEYS]
Definition: catcache.h:64
int cc_nlist
Definition: catcache.h:56
int id
Definition: catcache.h:46
TupleDesc cc_tupdesc
Definition: catcache.h:48
int cc_nlbuckets
Definition: catcache.h:57
dlist_head * cc_lbucket
Definition: catcache.h:58
slist_head ch_caches
Definition: catcache.h:191
dlist_node cache_elem
Definition: catcache.h:166
int refcount
Definition: catcache.h:179
CatCache * my_cache
Definition: catcache.h:184
int cl_magic
Definition: catcache.h:168
bool dead
Definition: catcache.h:180
short nkeys
Definition: catcache.h:182
Datum keys[CATCACHE_MAXKEYS]
Definition: catcache.h:177
bool ordered
Definition: catcache.h:181
CatCTup * members[FLEXIBLE_ARRAY_MEMBER]
Definition: catcache.h:185
uint32 hash_value
Definition: catcache.h:171
int n_members
Definition: catcache.h:183
int ct_magic
Definition: catcache.h:98
int refcount
Definition: catcache.h:121
bool negative
Definition: catcache.h:123
dlist_node cache_elem
Definition: catcache.h:96
HeapTupleData tuple
Definition: catcache.h:124
CatCache * my_cache
Definition: catcache.h:135
struct catclist * c_list
Definition: catcache.h:133
Datum keys[CATCACHE_MAXKEYS]
Definition: catcache.h:107
bool dead
Definition: catcache.h:122
uint32 hash_value
Definition: catcache.h:101
dlist_node * cur
Definition: ilist.h:179
dlist_node * cur
Definition: ilist.h:200
Definition: c.h:751
slist_node * cur
Definition: ilist.h:259
void table_close(Relation relation, LOCKMODE lockmode)
Definition: table.c:126
Relation table_open(Oid relationId, LOCKMODE lockmode)
Definition: table.c:40
TupleDesc CreateTupleDescCopyConstr(TupleDesc tupdesc)
Definition: tupdesc.c:340
static FormData_pg_attribute * TupleDescAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:160
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:175
struct TupleDescData * TupleDesc
Definition: tupdesc.h:145
Datum texteq(PG_FUNCTION_ARGS)
Definition: varlena.c:1370
bool IsTransactionState(void)
Definition: xact.c:388