Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * catcache.c
4 : * System catalog cache for tuples matching a key.
5 : *
6 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/utils/cache/catcache.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include "access/genam.h"
18 : #include "access/heaptoast.h"
19 : #include "access/relscan.h"
20 : #include "access/table.h"
21 : #include "access/xact.h"
22 : #include "catalog/catalog.h"
23 : #include "catalog/pg_collation.h"
24 : #include "catalog/pg_type.h"
25 : #include "common/hashfn.h"
26 : #include "common/pg_prng.h"
27 : #include "miscadmin.h"
28 : #include "port/pg_bitutils.h"
29 : #ifdef CATCACHE_STATS
30 : #include "storage/ipc.h" /* for on_proc_exit */
31 : #endif
32 : #include "storage/lmgr.h"
33 : #include "utils/builtins.h"
34 : #include "utils/catcache.h"
35 : #include "utils/datum.h"
36 : #include "utils/fmgroids.h"
37 : #include "utils/inval.h"
38 : #include "utils/memutils.h"
39 : #include "utils/rel.h"
40 : #include "utils/resowner.h"
41 : #include "utils/syscache.h"
42 :
43 :
44 : /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
45 :
46 : /*
47 : * Given a hash value and the size of the hash table, find the bucket
48 : * in which the hash value belongs. Since the hash table must contain
49 : * a power-of-2 number of elements, this is a simple bitmask.
50 : */
51 : #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
52 :
53 :
54 : /*
55 : * variables, macros and other stuff
56 : */
57 :
58 : #ifdef CACHEDEBUG
59 : #define CACHE_elog(...) elog(__VA_ARGS__)
60 : #else
61 : #define CACHE_elog(...)
62 : #endif
63 :
64 : /* Cache management header --- pointer is NULL until created */
65 : static CatCacheHeader *CacheHdr = NULL;
66 :
67 : static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
68 : int nkeys,
69 : Datum v1, Datum v2,
70 : Datum v3, Datum v4);
71 :
72 : static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
73 : int nkeys,
74 : uint32 hashValue,
75 : Index hashIndex,
76 : Datum v1, Datum v2,
77 : Datum v3, Datum v4);
78 :
79 : static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
80 : Datum v1, Datum v2, Datum v3, Datum v4);
81 : static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
82 : HeapTuple tuple);
83 : static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
84 : const Datum *cachekeys,
85 : const Datum *searchkeys);
86 :
87 : #ifdef CATCACHE_STATS
88 : static void CatCachePrintStats(int code, Datum arg);
89 : #endif
90 : static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
91 : static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
92 : static void RehashCatCache(CatCache *cp);
93 : static void RehashCatCacheLists(CatCache *cp);
94 : static void CatalogCacheInitializeCache(CatCache *cache);
95 : static CatCTup *CatalogCacheCreateEntry(CatCache *cache,
96 : HeapTuple ntp, SysScanDesc scandesc,
97 : Datum *arguments,
98 : uint32 hashValue, Index hashIndex);
99 :
100 : static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner);
101 : static void ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner);
102 : static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
103 : Datum *keys);
104 : static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
105 : Datum *srckeys, Datum *dstkeys);
106 :
107 :
108 : /*
109 : * internal support functions
110 : */
111 :
112 : /* ResourceOwner callbacks to hold catcache references */
113 :
114 : static void ResOwnerReleaseCatCache(Datum res);
115 : static char *ResOwnerPrintCatCache(Datum res);
116 : static void ResOwnerReleaseCatCacheList(Datum res);
117 : static char *ResOwnerPrintCatCacheList(Datum res);
118 :
119 : static const ResourceOwnerDesc catcache_resowner_desc =
120 : {
121 : /* catcache references */
122 : .name = "catcache reference",
123 : .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
124 : .release_priority = RELEASE_PRIO_CATCACHE_REFS,
125 : .ReleaseResource = ResOwnerReleaseCatCache,
126 : .DebugPrint = ResOwnerPrintCatCache
127 : };
128 :
129 : static const ResourceOwnerDesc catlistref_resowner_desc =
130 : {
131 : /* catcache-list pins */
132 : .name = "catcache list reference",
133 : .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
134 : .release_priority = RELEASE_PRIO_CATCACHE_LIST_REFS,
135 : .ReleaseResource = ResOwnerReleaseCatCacheList,
136 : .DebugPrint = ResOwnerPrintCatCacheList
137 : };
138 :
139 : /* Convenience wrappers over ResourceOwnerRemember/Forget */
140 : static inline void
141 68711758 : ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
142 : {
143 68711758 : ResourceOwnerRemember(owner, PointerGetDatum(tuple), &catcache_resowner_desc);
144 68711758 : }
145 : static inline void
146 68701538 : ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
147 : {
148 68701538 : ResourceOwnerForget(owner, PointerGetDatum(tuple), &catcache_resowner_desc);
149 68701538 : }
150 : static inline void
151 3022246 : ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
152 : {
153 3022246 : ResourceOwnerRemember(owner, PointerGetDatum(list), &catlistref_resowner_desc);
154 3022246 : }
155 : static inline void
156 3022210 : ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
157 : {
158 3022210 : ResourceOwnerForget(owner, PointerGetDatum(list), &catlistref_resowner_desc);
159 3022210 : }
160 :
161 :
162 : /*
163 : * Hash and equality functions for system types that are used as cache key
164 : * fields. In some cases, we just call the regular SQL-callable functions for
165 : * the appropriate data type, but that tends to be a little slow, and the
166 : * speed of these functions is performance-critical. Therefore, for data
167 : * types that frequently occur as catcache keys, we hard-code the logic here.
168 : * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
169 : * in certain cases (like int4) we can adopt a faster hash algorithm as well.
170 : */
171 :
172 : static bool
173 4442766 : chareqfast(Datum a, Datum b)
174 : {
175 4442766 : return DatumGetChar(a) == DatumGetChar(b);
176 : }
177 :
178 : static uint32
179 5088770 : charhashfast(Datum datum)
180 : {
181 5088770 : return murmurhash32((int32) DatumGetChar(datum));
182 : }
183 :
184 : static bool
185 3391166 : nameeqfast(Datum a, Datum b)
186 : {
187 3391166 : char *ca = NameStr(*DatumGetName(a));
188 3391166 : char *cb = NameStr(*DatumGetName(b));
189 :
190 3391166 : return strncmp(ca, cb, NAMEDATALEN) == 0;
191 : }
192 :
193 : static uint32
194 7513544 : namehashfast(Datum datum)
195 : {
196 7513544 : char *key = NameStr(*DatumGetName(datum));
197 :
198 7513544 : return hash_any((unsigned char *) key, strlen(key));
199 : }
200 :
201 : static bool
202 6914628 : int2eqfast(Datum a, Datum b)
203 : {
204 6914628 : return DatumGetInt16(a) == DatumGetInt16(b);
205 : }
206 :
207 : static uint32
208 9413032 : int2hashfast(Datum datum)
209 : {
210 9413032 : return murmurhash32((int32) DatumGetInt16(datum));
211 : }
212 :
213 : static bool
214 80596708 : int4eqfast(Datum a, Datum b)
215 : {
216 80596708 : return DatumGetInt32(a) == DatumGetInt32(b);
217 : }
218 :
219 : static uint32
220 93598940 : int4hashfast(Datum datum)
221 : {
222 93598940 : return murmurhash32((int32) DatumGetInt32(datum));
223 : }
224 :
225 : static bool
226 166 : texteqfast(Datum a, Datum b)
227 : {
228 : /*
229 : * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
230 : * want to take the fast "deterministic" path in texteq().
231 : */
232 166 : return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
233 : }
234 :
235 : static uint32
236 3312 : texthashfast(Datum datum)
237 : {
238 : /* analogously here as in texteqfast() */
239 3312 : return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
240 : }
241 :
242 : static bool
243 2530 : oidvectoreqfast(Datum a, Datum b)
244 : {
245 2530 : return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b));
246 : }
247 :
248 : static uint32
249 333714 : oidvectorhashfast(Datum datum)
250 : {
251 333714 : return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum));
252 : }
253 :
254 : /* Lookup support functions for a type. */
255 : static void
256 890276 : GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
257 : {
258 890276 : switch (keytype)
259 : {
260 12098 : case BOOLOID:
261 12098 : *hashfunc = charhashfast;
262 12098 : *fasteqfunc = chareqfast;
263 12098 : *eqfunc = F_BOOLEQ;
264 12098 : break;
265 16308 : case CHAROID:
266 16308 : *hashfunc = charhashfast;
267 16308 : *fasteqfunc = chareqfast;
268 16308 : *eqfunc = F_CHAREQ;
269 16308 : break;
270 167568 : case NAMEOID:
271 167568 : *hashfunc = namehashfast;
272 167568 : *fasteqfunc = nameeqfast;
273 167568 : *eqfunc = F_NAMEEQ;
274 167568 : break;
275 53636 : case INT2OID:
276 53636 : *hashfunc = int2hashfast;
277 53636 : *fasteqfunc = int2eqfast;
278 53636 : *eqfunc = F_INT2EQ;
279 53636 : break;
280 11844 : case INT4OID:
281 11844 : *hashfunc = int4hashfast;
282 11844 : *fasteqfunc = int4eqfast;
283 11844 : *eqfunc = F_INT4EQ;
284 11844 : break;
285 5390 : case TEXTOID:
286 5390 : *hashfunc = texthashfast;
287 5390 : *fasteqfunc = texteqfast;
288 5390 : *eqfunc = F_TEXTEQ;
289 5390 : break;
290 610464 : case OIDOID:
291 : case REGPROCOID:
292 : case REGPROCEDUREOID:
293 : case REGOPEROID:
294 : case REGOPERATOROID:
295 : case REGCLASSOID:
296 : case REGTYPEOID:
297 : case REGCOLLATIONOID:
298 : case REGCONFIGOID:
299 : case REGDICTIONARYOID:
300 : case REGROLEOID:
301 : case REGNAMESPACEOID:
302 610464 : *hashfunc = int4hashfast;
303 610464 : *fasteqfunc = int4eqfast;
304 610464 : *eqfunc = F_OIDEQ;
305 610464 : break;
306 12968 : case OIDVECTOROID:
307 12968 : *hashfunc = oidvectorhashfast;
308 12968 : *fasteqfunc = oidvectoreqfast;
309 12968 : *eqfunc = F_OIDVECTOREQ;
310 12968 : break;
311 0 : default:
312 0 : elog(FATAL, "type %u not supported as catcache key", keytype);
313 : *hashfunc = NULL; /* keep compiler quiet */
314 :
315 : *eqfunc = InvalidOid;
316 : break;
317 : }
318 890276 : }
319 :
320 : /*
321 : * CatalogCacheComputeHashValue
322 : *
323 : * Compute the hash value associated with a given set of lookup keys
324 : */
325 : static uint32
326 82566442 : CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
327 : Datum v1, Datum v2, Datum v3, Datum v4)
328 : {
329 82566442 : uint32 hashValue = 0;
330 : uint32 oneHash;
331 82566442 : CCHashFN *cc_hashfunc = cache->cc_hashfunc;
332 :
333 : CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
334 : cache->cc_relname, nkeys, cache);
335 :
336 82566442 : switch (nkeys)
337 : {
338 3807102 : case 4:
339 3807102 : oneHash = (cc_hashfunc[3]) (v4);
340 3807102 : hashValue ^= pg_rotate_left32(oneHash, 24);
341 : /* FALLTHROUGH */
342 9581386 : case 3:
343 9581386 : oneHash = (cc_hashfunc[2]) (v3);
344 9581386 : hashValue ^= pg_rotate_left32(oneHash, 16);
345 : /* FALLTHROUGH */
346 19996382 : case 2:
347 19996382 : oneHash = (cc_hashfunc[1]) (v2);
348 19996382 : hashValue ^= pg_rotate_left32(oneHash, 8);
349 : /* FALLTHROUGH */
350 82566442 : case 1:
351 82566442 : oneHash = (cc_hashfunc[0]) (v1);
352 82566442 : hashValue ^= oneHash;
353 82566442 : break;
354 0 : default:
355 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
356 : break;
357 : }
358 :
359 82566442 : return hashValue;
360 : }
361 :
362 : /*
363 : * CatalogCacheComputeTupleHashValue
364 : *
365 : * Compute the hash value associated with a given tuple to be cached
366 : */
367 : static uint32
368 5804460 : CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
369 : {
370 5804460 : Datum v1 = 0,
371 5804460 : v2 = 0,
372 5804460 : v3 = 0,
373 5804460 : v4 = 0;
374 5804460 : bool isNull = false;
375 5804460 : int *cc_keyno = cache->cc_keyno;
376 5804460 : TupleDesc cc_tupdesc = cache->cc_tupdesc;
377 :
378 : /* Now extract key fields from tuple, insert into scankey */
379 5804460 : switch (nkeys)
380 : {
381 389854 : case 4:
382 389854 : v4 = fastgetattr(tuple,
383 389854 : cc_keyno[3],
384 : cc_tupdesc,
385 : &isNull);
386 : Assert(!isNull);
387 : /* FALLTHROUGH */
388 1074902 : case 3:
389 1074902 : v3 = fastgetattr(tuple,
390 1074902 : cc_keyno[2],
391 : cc_tupdesc,
392 : &isNull);
393 : Assert(!isNull);
394 : /* FALLTHROUGH */
395 4374242 : case 2:
396 4374242 : v2 = fastgetattr(tuple,
397 4374242 : cc_keyno[1],
398 : cc_tupdesc,
399 : &isNull);
400 : Assert(!isNull);
401 : /* FALLTHROUGH */
402 5804460 : case 1:
403 5804460 : v1 = fastgetattr(tuple,
404 : cc_keyno[0],
405 : cc_tupdesc,
406 : &isNull);
407 : Assert(!isNull);
408 5804460 : break;
409 0 : default:
410 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
411 : break;
412 : }
413 :
414 5804460 : return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
415 : }
416 :
417 : /*
418 : * CatalogCacheCompareTuple
419 : *
420 : * Compare a tuple to the passed arguments.
421 : */
422 : static inline bool
423 71640340 : CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
424 : const Datum *cachekeys,
425 : const Datum *searchkeys)
426 : {
427 71640340 : const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
428 : int i;
429 :
430 166988304 : for (i = 0; i < nkeys; i++)
431 : {
432 95347964 : if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
433 0 : return false;
434 : }
435 71640340 : return true;
436 : }
437 :
438 :
439 : #ifdef CATCACHE_STATS
440 :
441 : static void
442 : CatCachePrintStats(int code, Datum arg)
443 : {
444 : slist_iter iter;
445 : long cc_searches = 0;
446 : long cc_hits = 0;
447 : long cc_neg_hits = 0;
448 : long cc_newloads = 0;
449 : long cc_invals = 0;
450 : long cc_nlists = 0;
451 : long cc_lsearches = 0;
452 : long cc_lhits = 0;
453 :
454 : slist_foreach(iter, &CacheHdr->ch_caches)
455 : {
456 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
457 :
458 : if (cache->cc_ntup == 0 && cache->cc_searches == 0)
459 : continue; /* don't print unused caches */
460 : elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %d lists, %ld lsrch, %ld lhits",
461 : cache->cc_relname,
462 : cache->cc_indexoid,
463 : cache->cc_ntup,
464 : cache->cc_searches,
465 : cache->cc_hits,
466 : cache->cc_neg_hits,
467 : cache->cc_hits + cache->cc_neg_hits,
468 : cache->cc_newloads,
469 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
470 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
471 : cache->cc_invals,
472 : cache->cc_nlist,
473 : cache->cc_lsearches,
474 : cache->cc_lhits);
475 : cc_searches += cache->cc_searches;
476 : cc_hits += cache->cc_hits;
477 : cc_neg_hits += cache->cc_neg_hits;
478 : cc_newloads += cache->cc_newloads;
479 : cc_invals += cache->cc_invals;
480 : cc_nlists += cache->cc_nlist;
481 : cc_lsearches += cache->cc_lsearches;
482 : cc_lhits += cache->cc_lhits;
483 : }
484 : elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lists, %ld lsrch, %ld lhits",
485 : CacheHdr->ch_ntup,
486 : cc_searches,
487 : cc_hits,
488 : cc_neg_hits,
489 : cc_hits + cc_neg_hits,
490 : cc_newloads,
491 : cc_searches - cc_hits - cc_neg_hits - cc_newloads,
492 : cc_searches - cc_hits - cc_neg_hits,
493 : cc_invals,
494 : cc_nlists,
495 : cc_lsearches,
496 : cc_lhits);
497 : }
498 : #endif /* CATCACHE_STATS */
499 :
500 :
501 : /*
502 : * CatCacheRemoveCTup
503 : *
504 : * Unlink and delete the given cache entry
505 : *
506 : * NB: if it is a member of a CatCList, the CatCList is deleted too.
507 : * Both the cache entry and the list had better have zero refcount.
508 : */
509 : static void
510 1382940 : CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
511 : {
512 : Assert(ct->refcount == 0);
513 : Assert(ct->my_cache == cache);
514 :
515 1382940 : if (ct->c_list)
516 : {
517 : /*
518 : * The cleanest way to handle this is to call CatCacheRemoveCList,
519 : * which will recurse back to me, and the recursive call will do the
520 : * work. Set the "dead" flag to make sure it does recurse.
521 : */
522 0 : ct->dead = true;
523 0 : CatCacheRemoveCList(cache, ct->c_list);
524 0 : return; /* nothing left to do */
525 : }
526 :
527 : /* delink from linked list */
528 1382940 : dlist_delete(&ct->cache_elem);
529 :
530 : /*
531 : * Free keys when we're dealing with a negative entry, normal entries just
532 : * point into tuple, allocated together with the CatCTup.
533 : */
534 1382940 : if (ct->negative)
535 384036 : CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
536 384036 : cache->cc_keyno, ct->keys);
537 :
538 1382940 : pfree(ct);
539 :
540 1382940 : --cache->cc_ntup;
541 1382940 : --CacheHdr->ch_ntup;
542 : }
543 :
544 : /*
545 : * CatCacheRemoveCList
546 : *
547 : * Unlink and delete the given cache list entry
548 : *
549 : * NB: any dead member entries that become unreferenced are deleted too.
550 : */
551 : static void
552 110598 : CatCacheRemoveCList(CatCache *cache, CatCList *cl)
553 : {
554 : int i;
555 :
556 : Assert(cl->refcount == 0);
557 : Assert(cl->my_cache == cache);
558 :
559 : /* delink from member tuples */
560 372118 : for (i = cl->n_members; --i >= 0;)
561 : {
562 261520 : CatCTup *ct = cl->members[i];
563 :
564 : Assert(ct->c_list == cl);
565 261520 : ct->c_list = NULL;
566 : /* if the member is dead and now has no references, remove it */
567 261520 : if (
568 : #ifndef CATCACHE_FORCE_RELEASE
569 261520 : ct->dead &&
570 : #endif
571 144 : ct->refcount == 0)
572 144 : CatCacheRemoveCTup(cache, ct);
573 : }
574 :
575 : /* delink from linked list */
576 110598 : dlist_delete(&cl->cache_elem);
577 :
578 : /* free associated column data */
579 110598 : CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
580 110598 : cache->cc_keyno, cl->keys);
581 :
582 110598 : pfree(cl);
583 :
584 110598 : --cache->cc_nlist;
585 110598 : }
586 :
587 :
588 : /*
589 : * CatCacheInvalidate
590 : *
591 : * Invalidate entries in the specified cache, given a hash value.
592 : *
593 : * We delete cache entries that match the hash value, whether positive
594 : * or negative. We don't care whether the invalidation is the result
595 : * of a tuple insertion or a deletion.
596 : *
597 : * We used to try to match positive cache entries by TID, but that is
598 : * unsafe after a VACUUM FULL on a system catalog: an inval event could
599 : * be queued before VACUUM FULL, and then processed afterwards, when the
600 : * target tuple that has to be invalidated has a different TID than it
601 : * did when the event was created. So now we just compare hash values and
602 : * accept the small risk of unnecessary invalidations due to false matches.
603 : *
604 : * This routine is only quasi-public: it should only be used by inval.c.
605 : */
606 : void
607 18392234 : CatCacheInvalidate(CatCache *cache, uint32 hashValue)
608 : {
609 : Index hashIndex;
610 : dlist_mutable_iter iter;
611 :
612 : CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
613 :
614 : /*
615 : * We don't bother to check whether the cache has finished initialization
616 : * yet; if not, there will be no entries in it so no problem.
617 : */
618 :
619 : /*
620 : * Invalidate *all* CatCLists in this cache; it's too hard to tell which
621 : * searches might still be correct, so just zap 'em all.
622 : */
623 21748010 : for (int i = 0; i < cache->cc_nlbuckets; i++)
624 : {
625 3355776 : dlist_head *bucket = &cache->cc_lbucket[i];
626 :
627 3461416 : dlist_foreach_modify(iter, bucket)
628 : {
629 105640 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
630 :
631 105640 : if (cl->refcount > 0)
632 144 : cl->dead = true;
633 : else
634 105496 : CatCacheRemoveCList(cache, cl);
635 : }
636 : }
637 :
638 : /*
639 : * inspect the proper hash bucket for tuple matches
640 : */
641 18392234 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
642 24842148 : dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
643 : {
644 6449914 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
645 :
646 6449914 : if (hashValue == ct->hash_value)
647 : {
648 1190464 : if (ct->refcount > 0 ||
649 1189338 : (ct->c_list && ct->c_list->refcount > 0))
650 : {
651 1270 : ct->dead = true;
652 : /* list, if any, was marked dead above */
653 1270 : Assert(ct->c_list == NULL || ct->c_list->dead);
654 : }
655 : else
656 1189194 : CatCacheRemoveCTup(cache, ct);
657 : CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
658 : #ifdef CATCACHE_STATS
659 : cache->cc_invals++;
660 : #endif
661 : /* could be multiple matches, so keep looking! */
662 : }
663 : }
664 18392234 : }
665 :
666 : /* ----------------------------------------------------------------
667 : * public functions
668 : * ----------------------------------------------------------------
669 : */
670 :
671 :
672 : /*
673 : * Standard routine for creating cache context if it doesn't exist yet
674 : *
675 : * There are a lot of places (probably far more than necessary) that check
676 : * whether CacheMemoryContext exists yet and want to create it if not.
677 : * We centralize knowledge of exactly how to create it here.
678 : */
679 : void
680 26074 : CreateCacheMemoryContext(void)
681 : {
682 : /*
683 : * Purely for paranoia, check that context doesn't exist; caller probably
684 : * did so already.
685 : */
686 26074 : if (!CacheMemoryContext)
687 26074 : CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
688 : "CacheMemoryContext",
689 : ALLOCSET_DEFAULT_SIZES);
690 26074 : }
691 :
692 :
693 : /*
694 : * ResetCatalogCache
695 : *
696 : * Reset one catalog cache to empty.
697 : *
698 : * This is not very efficient if the target cache is nearly empty.
699 : * However, it shouldn't need to be efficient; we don't invoke it often.
700 : */
701 : static void
702 334298 : ResetCatalogCache(CatCache *cache)
703 : {
704 : dlist_mutable_iter iter;
705 : int i;
706 :
707 : /* Remove each list in this cache, or at least mark it dead */
708 376410 : for (i = 0; i < cache->cc_nlbuckets; i++)
709 : {
710 42112 : dlist_head *bucket = &cache->cc_lbucket[i];
711 :
712 47208 : dlist_foreach_modify(iter, bucket)
713 : {
714 5096 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
715 :
716 5096 : if (cl->refcount > 0)
717 0 : cl->dead = true;
718 : else
719 5096 : CatCacheRemoveCList(cache, cl);
720 : }
721 : }
722 :
723 : /* Remove each tuple in this cache, or at least mark it dead */
724 10202926 : for (i = 0; i < cache->cc_nbuckets; i++)
725 : {
726 9868628 : dlist_head *bucket = &cache->cc_bucket[i];
727 :
728 10061110 : dlist_foreach_modify(iter, bucket)
729 : {
730 192482 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
731 :
732 192482 : if (ct->refcount > 0 ||
733 192478 : (ct->c_list && ct->c_list->refcount > 0))
734 : {
735 4 : ct->dead = true;
736 : /* list, if any, was marked dead above */
737 4 : Assert(ct->c_list == NULL || ct->c_list->dead);
738 : }
739 : else
740 192478 : CatCacheRemoveCTup(cache, ct);
741 : #ifdef CATCACHE_STATS
742 : cache->cc_invals++;
743 : #endif
744 : }
745 : }
746 334298 : }
747 :
748 : /*
749 : * ResetCatalogCaches
750 : *
751 : * Reset all caches when a shared cache inval event forces it
752 : */
753 : void
754 4018 : ResetCatalogCaches(void)
755 : {
756 : slist_iter iter;
757 :
758 : CACHE_elog(DEBUG2, "ResetCatalogCaches called");
759 :
760 337512 : slist_foreach(iter, &CacheHdr->ch_caches)
761 : {
762 333494 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
763 :
764 333494 : ResetCatalogCache(cache);
765 : }
766 :
767 : CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
768 4018 : }
769 :
770 : /*
771 : * CatalogCacheFlushCatalog
772 : *
773 : * Flush all catcache entries that came from the specified system catalog.
774 : * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
775 : * tuples very likely now have different TIDs than before. (At one point
776 : * we also tried to force re-execution of CatalogCacheInitializeCache for
777 : * the cache(s) on that catalog. This is a bad idea since it leads to all
778 : * kinds of trouble if a cache flush occurs while loading cache entries.
779 : * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
780 : * rather than relying on the relcache to keep a tupdesc for us. Of course
781 : * this assumes the tupdesc of a cachable system table will not change...)
782 : */
783 : void
784 648 : CatalogCacheFlushCatalog(Oid catId)
785 : {
786 : slist_iter iter;
787 :
788 : CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
789 :
790 54432 : slist_foreach(iter, &CacheHdr->ch_caches)
791 : {
792 53784 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
793 :
794 : /* Does this cache store tuples of the target catalog? */
795 53784 : if (cache->cc_reloid == catId)
796 : {
797 : /* Yes, so flush all its contents */
798 804 : ResetCatalogCache(cache);
799 :
800 : /* Tell inval.c to call syscache callbacks for this cache */
801 804 : CallSyscacheCallbacks(cache->id, 0);
802 : }
803 : }
804 :
805 : CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
806 648 : }
807 :
808 : /*
809 : * InitCatCache
810 : *
811 : * This allocates and initializes a cache for a system catalog relation.
812 : * Actually, the cache is only partially initialized to avoid opening the
813 : * relation. The relation will be opened and the rest of the cache
814 : * structure initialized on the first access.
815 : */
816 : #ifdef CACHEDEBUG
817 : #define InitCatCache_DEBUG2 \
818 : do { \
819 : elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
820 : cp->cc_reloid, cp->cc_indexoid, cp->id, \
821 : cp->cc_nkeys, cp->cc_nbuckets); \
822 : } while(0)
823 : #else
824 : #define InitCatCache_DEBUG2
825 : #endif
826 :
827 : CatCache *
828 2164142 : InitCatCache(int id,
829 : Oid reloid,
830 : Oid indexoid,
831 : int nkeys,
832 : const int *key,
833 : int nbuckets)
834 : {
835 : CatCache *cp;
836 : MemoryContext oldcxt;
837 : int i;
838 :
839 : /*
840 : * nbuckets is the initial number of hash buckets to use in this catcache.
841 : * It will be enlarged later if it becomes too full.
842 : *
843 : * nbuckets must be a power of two. We check this via Assert rather than
844 : * a full runtime check because the values will be coming from constant
845 : * tables.
846 : *
847 : * If you're confused by the power-of-two check, see comments in
848 : * bitmapset.c for an explanation.
849 : */
850 : Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
851 :
852 : /*
853 : * first switch to the cache context so our allocations do not vanish at
854 : * the end of a transaction
855 : */
856 2164142 : if (!CacheMemoryContext)
857 0 : CreateCacheMemoryContext();
858 :
859 2164142 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
860 :
861 : /*
862 : * if first time through, initialize the cache group header
863 : */
864 2164142 : if (CacheHdr == NULL)
865 : {
866 26074 : CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
867 26074 : slist_init(&CacheHdr->ch_caches);
868 26074 : CacheHdr->ch_ntup = 0;
869 : #ifdef CATCACHE_STATS
870 : /* set up to dump stats at backend exit */
871 : on_proc_exit(CatCachePrintStats, 0);
872 : #endif
873 : }
874 :
875 : /*
876 : * Allocate a new cache structure, aligning to a cacheline boundary
877 : *
878 : * Note: we rely on zeroing to initialize all the dlist headers correctly
879 : */
880 2164142 : cp = (CatCache *) palloc_aligned(sizeof(CatCache), PG_CACHE_LINE_SIZE,
881 : MCXT_ALLOC_ZERO);
882 2164142 : cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
883 :
884 : /*
885 : * Many catcaches never receive any list searches. Therefore, we don't
886 : * allocate the cc_lbuckets till we get a list search.
887 : */
888 2164142 : cp->cc_lbucket = NULL;
889 :
890 : /*
891 : * initialize the cache's relation information for the relation
892 : * corresponding to this cache, and initialize some of the new cache's
893 : * other internal fields. But don't open the relation yet.
894 : */
895 2164142 : cp->id = id;
896 2164142 : cp->cc_relname = "(not known yet)";
897 2164142 : cp->cc_reloid = reloid;
898 2164142 : cp->cc_indexoid = indexoid;
899 2164142 : cp->cc_relisshared = false; /* temporary */
900 2164142 : cp->cc_tupdesc = (TupleDesc) NULL;
901 2164142 : cp->cc_ntup = 0;
902 2164142 : cp->cc_nlist = 0;
903 2164142 : cp->cc_nbuckets = nbuckets;
904 2164142 : cp->cc_nlbuckets = 0;
905 2164142 : cp->cc_nkeys = nkeys;
906 5684132 : for (i = 0; i < nkeys; ++i)
907 : {
908 : Assert(AttributeNumberIsValid(key[i]));
909 3519990 : cp->cc_keyno[i] = key[i];
910 : }
911 :
912 : /*
913 : * new cache is initialized as far as we can go for now. print some
914 : * debugging information, if appropriate.
915 : */
916 : InitCatCache_DEBUG2;
917 :
918 : /*
919 : * add completed cache to top of group header's list
920 : */
921 2164142 : slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
922 :
923 : /*
924 : * back to the old context before we return...
925 : */
926 2164142 : MemoryContextSwitchTo(oldcxt);
927 :
928 2164142 : return cp;
929 : }
930 :
931 : /*
932 : * Enlarge a catcache, doubling the number of buckets.
933 : */
934 : static void
935 4940 : RehashCatCache(CatCache *cp)
936 : {
937 : dlist_head *newbucket;
938 : int newnbuckets;
939 : int i;
940 :
941 4940 : elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
942 : cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
943 :
944 : /* Allocate a new, larger, hash table. */
945 4940 : newnbuckets = cp->cc_nbuckets * 2;
946 4940 : newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
947 :
948 : /* Move all entries from old hash table to new. */
949 422636 : for (i = 0; i < cp->cc_nbuckets; i++)
950 : {
951 : dlist_mutable_iter iter;
952 :
953 1258028 : dlist_foreach_modify(iter, &cp->cc_bucket[i])
954 : {
955 840332 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
956 840332 : int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
957 :
958 840332 : dlist_delete(iter.cur);
959 840332 : dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
960 : }
961 : }
962 :
963 : /* Switch to the new array. */
964 4940 : pfree(cp->cc_bucket);
965 4940 : cp->cc_nbuckets = newnbuckets;
966 4940 : cp->cc_bucket = newbucket;
967 4940 : }
968 :
969 : /*
970 : * Enlarge a catcache's list storage, doubling the number of buckets.
971 : */
972 : static void
973 1048 : RehashCatCacheLists(CatCache *cp)
974 : {
975 : dlist_head *newbucket;
976 : int newnbuckets;
977 : int i;
978 :
979 1048 : elog(DEBUG1, "rehashing catalog cache id %d for %s; %d lists, %d buckets",
980 : cp->id, cp->cc_relname, cp->cc_nlist, cp->cc_nlbuckets);
981 :
982 : /* Allocate a new, larger, hash table. */
983 1048 : newnbuckets = cp->cc_nlbuckets * 2;
984 1048 : newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
985 :
986 : /* Move all entries from old hash table to new. */
987 39640 : for (i = 0; i < cp->cc_nlbuckets; i++)
988 : {
989 : dlist_mutable_iter iter;
990 :
991 116824 : dlist_foreach_modify(iter, &cp->cc_lbucket[i])
992 : {
993 78232 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
994 78232 : int hashIndex = HASH_INDEX(cl->hash_value, newnbuckets);
995 :
996 78232 : dlist_delete(iter.cur);
997 78232 : dlist_push_head(&newbucket[hashIndex], &cl->cache_elem);
998 : }
999 : }
1000 :
1001 : /* Switch to the new array. */
1002 1048 : pfree(cp->cc_lbucket);
1003 1048 : cp->cc_nlbuckets = newnbuckets;
1004 1048 : cp->cc_lbucket = newbucket;
1005 1048 : }
1006 :
1007 : /*
1008 : * CatalogCacheInitializeCache
1009 : *
1010 : * This function does final initialization of a catcache: obtain the tuple
1011 : * descriptor and set up the hash and equality function links. We assume
1012 : * that the relcache entry can be opened at this point!
1013 : */
1014 : #ifdef CACHEDEBUG
1015 : #define CatalogCacheInitializeCache_DEBUG1 \
1016 : elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
1017 : cache->cc_reloid)
1018 :
1019 : #define CatalogCacheInitializeCache_DEBUG2 \
1020 : do { \
1021 : if (cache->cc_keyno[i] > 0) { \
1022 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
1023 : i+1, cache->cc_nkeys, cache->cc_keyno[i], \
1024 : TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
1025 : } else { \
1026 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
1027 : i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
1028 : } \
1029 : } while(0)
1030 : #else
1031 : #define CatalogCacheInitializeCache_DEBUG1
1032 : #define CatalogCacheInitializeCache_DEBUG2
1033 : #endif
1034 :
1035 : static void
1036 558426 : CatalogCacheInitializeCache(CatCache *cache)
1037 : {
1038 : Relation relation;
1039 : MemoryContext oldcxt;
1040 : TupleDesc tupdesc;
1041 : int i;
1042 :
1043 : CatalogCacheInitializeCache_DEBUG1;
1044 :
1045 558426 : relation = table_open(cache->cc_reloid, AccessShareLock);
1046 :
1047 : /*
1048 : * switch to the cache context so our allocations do not vanish at the end
1049 : * of a transaction
1050 : */
1051 : Assert(CacheMemoryContext != NULL);
1052 :
1053 558420 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1054 :
1055 : /*
1056 : * copy the relcache's tuple descriptor to permanent cache storage
1057 : */
1058 558420 : tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
1059 :
1060 : /*
1061 : * save the relation's name and relisshared flag, too (cc_relname is used
1062 : * only for debugging purposes)
1063 : */
1064 558420 : cache->cc_relname = pstrdup(RelationGetRelationName(relation));
1065 558420 : cache->cc_relisshared = RelationGetForm(relation)->relisshared;
1066 :
1067 : /*
1068 : * return to the caller's memory context and close the rel
1069 : */
1070 558420 : MemoryContextSwitchTo(oldcxt);
1071 :
1072 558420 : table_close(relation, AccessShareLock);
1073 :
1074 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
1075 : cache->cc_relname, cache->cc_nkeys);
1076 :
1077 : /*
1078 : * initialize cache's key information
1079 : */
1080 1448696 : for (i = 0; i < cache->cc_nkeys; ++i)
1081 : {
1082 : Oid keytype;
1083 : RegProcedure eqfunc;
1084 :
1085 : CatalogCacheInitializeCache_DEBUG2;
1086 :
1087 890276 : if (cache->cc_keyno[i] > 0)
1088 : {
1089 890276 : Form_pg_attribute attr = TupleDescAttr(tupdesc,
1090 : cache->cc_keyno[i] - 1);
1091 :
1092 890276 : keytype = attr->atttypid;
1093 : /* cache key columns should always be NOT NULL */
1094 : Assert(attr->attnotnull);
1095 : }
1096 : else
1097 : {
1098 0 : if (cache->cc_keyno[i] < 0)
1099 0 : elog(FATAL, "sys attributes are not supported in caches");
1100 0 : keytype = OIDOID;
1101 : }
1102 :
1103 890276 : GetCCHashEqFuncs(keytype,
1104 : &cache->cc_hashfunc[i],
1105 : &eqfunc,
1106 : &cache->cc_fastequal[i]);
1107 :
1108 : /*
1109 : * Do equality-function lookup (we assume this won't need a catalog
1110 : * lookup for any supported type)
1111 : */
1112 890276 : fmgr_info_cxt(eqfunc,
1113 : &cache->cc_skey[i].sk_func,
1114 : CacheMemoryContext);
1115 :
1116 : /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1117 890276 : cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
1118 :
1119 : /* Fill in sk_strategy as well --- always standard equality */
1120 890276 : cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
1121 890276 : cache->cc_skey[i].sk_subtype = InvalidOid;
1122 : /* If a catcache key requires a collation, it must be C collation */
1123 890276 : cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1124 :
1125 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1126 : cache->cc_relname, i, cache);
1127 : }
1128 :
1129 : /*
1130 : * mark this cache fully initialized
1131 : */
1132 558420 : cache->cc_tupdesc = tupdesc;
1133 558420 : }
1134 :
1135 : /*
1136 : * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1137 : *
1138 : * One reason to call this routine is to ensure that the relcache has
1139 : * created entries for all the catalogs and indexes referenced by catcaches.
1140 : * Therefore, provide an option to open the index as well as fixing the
1141 : * cache itself. An exception is the indexes on pg_am, which we don't use
1142 : * (cf. IndexScanOK).
1143 : */
1144 : void
1145 205984 : InitCatCachePhase2(CatCache *cache, bool touch_index)
1146 : {
1147 205984 : if (cache->cc_tupdesc == NULL)
1148 190916 : CatalogCacheInitializeCache(cache);
1149 :
1150 205982 : if (touch_index &&
1151 181326 : cache->id != AMOID &&
1152 179140 : cache->id != AMNAME)
1153 : {
1154 : Relation idesc;
1155 :
1156 : /*
1157 : * We must lock the underlying catalog before opening the index to
1158 : * avoid deadlock, since index_open could possibly result in reading
1159 : * this same catalog, and if anyone else is exclusive-locking this
1160 : * catalog and index they'll be doing it in that order.
1161 : */
1162 176954 : LockRelationOid(cache->cc_reloid, AccessShareLock);
1163 176954 : idesc = index_open(cache->cc_indexoid, AccessShareLock);
1164 :
1165 : /*
1166 : * While we've got the index open, let's check that it's unique (and
1167 : * not just deferrable-unique, thank you very much). This is just to
1168 : * catch thinkos in definitions of new catcaches, so we don't worry
1169 : * about the pg_am indexes not getting tested.
1170 : */
1171 : Assert(idesc->rd_index->indisunique &&
1172 : idesc->rd_index->indimmediate);
1173 :
1174 176952 : index_close(idesc, AccessShareLock);
1175 176952 : UnlockRelationOid(cache->cc_reloid, AccessShareLock);
1176 : }
1177 205980 : }
1178 :
1179 :
1180 : /*
1181 : * IndexScanOK
1182 : *
1183 : * This function checks for tuples that will be fetched by
1184 : * IndexSupportInitialize() during relcache initialization for
1185 : * certain system indexes that support critical syscaches.
1186 : * We can't use an indexscan to fetch these, else we'll get into
1187 : * infinite recursion. A plain heap scan will work, however.
1188 : * Once we have completed relcache initialization (signaled by
1189 : * criticalRelcachesBuilt), we don't have to worry anymore.
1190 : *
1191 : * Similarly, during backend startup we have to be able to use the
1192 : * pg_authid, pg_auth_members and pg_database syscaches for
1193 : * authentication even if we don't yet have relcache entries for those
1194 : * catalogs' indexes.
1195 : */
1196 : static bool
1197 4980962 : IndexScanOK(CatCache *cache, ScanKey cur_skey)
1198 : {
1199 4980962 : switch (cache->id)
1200 : {
1201 420312 : case INDEXRELID:
1202 :
1203 : /*
1204 : * Rather than tracking exactly which indexes have to be loaded
1205 : * before we can use indexscans (which changes from time to time),
1206 : * just force all pg_index searches to be heap scans until we've
1207 : * built the critical relcaches.
1208 : */
1209 420312 : if (!criticalRelcachesBuilt)
1210 27752 : return false;
1211 392560 : break;
1212 :
1213 48342 : case AMOID:
1214 : case AMNAME:
1215 :
1216 : /*
1217 : * Always do heap scans in pg_am, because it's so small there's
1218 : * not much point in an indexscan anyway. We *must* do this when
1219 : * initially building critical relcache entries, but we might as
1220 : * well just always do it.
1221 : */
1222 48342 : return false;
1223 :
1224 90582 : case AUTHNAME:
1225 : case AUTHOID:
1226 : case AUTHMEMMEMROLE:
1227 : case DATABASEOID:
1228 :
1229 : /*
1230 : * Protect authentication lookups occurring before relcache has
1231 : * collected entries for shared indexes.
1232 : */
1233 90582 : if (!criticalSharedRelcachesBuilt)
1234 3592 : return false;
1235 86990 : break;
1236 :
1237 4421726 : default:
1238 4421726 : break;
1239 : }
1240 :
1241 : /* Normal case, allow index scan */
1242 4901276 : return true;
1243 : }
1244 :
1245 : /*
1246 : * SearchCatCache
1247 : *
1248 : * This call searches a system cache for a tuple, opening the relation
1249 : * if necessary (on the first access to a particular cache).
1250 : *
1251 : * The result is NULL if not found, or a pointer to a HeapTuple in
1252 : * the cache. The caller must not modify the tuple, and must call
1253 : * ReleaseCatCache() when done with it.
1254 : *
1255 : * The search key values should be expressed as Datums of the key columns'
1256 : * datatype(s). (Pass zeroes for any unused parameters.) As a special
1257 : * exception, the passed-in key for a NAME column can be just a C string;
1258 : * the caller need not go to the trouble of converting it to a fully
1259 : * null-padded NAME.
1260 : */
1261 : HeapTuple
1262 4705350 : SearchCatCache(CatCache *cache,
1263 : Datum v1,
1264 : Datum v2,
1265 : Datum v3,
1266 : Datum v4)
1267 : {
1268 4705350 : return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1269 : }
1270 :
1271 :
1272 : /*
1273 : * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1274 : * arguments. The compiler can inline the body and unroll loops, making them a
1275 : * bit faster than SearchCatCache().
1276 : */
1277 :
1278 : HeapTuple
1279 56569404 : SearchCatCache1(CatCache *cache,
1280 : Datum v1)
1281 : {
1282 56569404 : return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1283 : }
1284 :
1285 :
1286 : HeapTuple
1287 4514316 : SearchCatCache2(CatCache *cache,
1288 : Datum v1, Datum v2)
1289 : {
1290 4514316 : return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1291 : }
1292 :
1293 :
1294 : HeapTuple
1295 4393588 : SearchCatCache3(CatCache *cache,
1296 : Datum v1, Datum v2, Datum v3)
1297 : {
1298 4393588 : return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1299 : }
1300 :
1301 :
1302 : HeapTuple
1303 3416402 : SearchCatCache4(CatCache *cache,
1304 : Datum v1, Datum v2, Datum v3, Datum v4)
1305 : {
1306 3416402 : return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1307 : }
1308 :
1309 : /*
1310 : * Work-horse for SearchCatCache/SearchCatCacheN.
1311 : */
1312 : static inline HeapTuple
1313 73599060 : SearchCatCacheInternal(CatCache *cache,
1314 : int nkeys,
1315 : Datum v1,
1316 : Datum v2,
1317 : Datum v3,
1318 : Datum v4)
1319 : {
1320 : Datum arguments[CATCACHE_MAXKEYS];
1321 : uint32 hashValue;
1322 : Index hashIndex;
1323 : dlist_iter iter;
1324 : dlist_head *bucket;
1325 : CatCTup *ct;
1326 :
1327 : /* Make sure we're in an xact, even if this ends up being a cache hit */
1328 : Assert(IsTransactionState());
1329 :
1330 : Assert(cache->cc_nkeys == nkeys);
1331 :
1332 : /*
1333 : * one-time startup overhead for each cache
1334 : */
1335 73599060 : if (unlikely(cache->cc_tupdesc == NULL))
1336 308060 : CatalogCacheInitializeCache(cache);
1337 :
1338 : #ifdef CATCACHE_STATS
1339 : cache->cc_searches++;
1340 : #endif
1341 :
1342 : /* Initialize local parameter array */
1343 73599056 : arguments[0] = v1;
1344 73599056 : arguments[1] = v2;
1345 73599056 : arguments[2] = v3;
1346 73599056 : arguments[3] = v4;
1347 :
1348 : /*
1349 : * find the hash bucket in which to look for the tuple
1350 : */
1351 73599056 : hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1352 73599056 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1353 :
1354 : /*
1355 : * scan the hash bucket until we find a match or exhaust our tuples
1356 : *
1357 : * Note: it's okay to use dlist_foreach here, even though we modify the
1358 : * dlist within the loop, because we don't continue the loop afterwards.
1359 : */
1360 73599056 : bucket = &cache->cc_bucket[hashIndex];
1361 78351840 : dlist_foreach(iter, bucket)
1362 : {
1363 73635668 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1364 :
1365 73635668 : if (ct->dead)
1366 0 : continue; /* ignore dead entries */
1367 :
1368 73635668 : if (ct->hash_value != hashValue)
1369 4752784 : continue; /* quickly skip entry if wrong hash val */
1370 :
1371 68882884 : if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1372 0 : continue;
1373 :
1374 : /*
1375 : * We found a match in the cache. Move it to the front of the list
1376 : * for its hashbucket, in order to speed subsequent searches. (The
1377 : * most frequently accessed elements in any hashbucket will tend to be
1378 : * near the front of the hashbucket's list.)
1379 : */
1380 68882884 : dlist_move_head(bucket, &ct->cache_elem);
1381 :
1382 : /*
1383 : * If it's a positive entry, bump its refcount and return it. If it's
1384 : * negative, we can report failure to the caller.
1385 : */
1386 68882884 : if (!ct->negative)
1387 : {
1388 65433158 : ResourceOwnerEnlarge(CurrentResourceOwner);
1389 65433158 : ct->refcount++;
1390 65433158 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1391 :
1392 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1393 : cache->cc_relname, hashIndex);
1394 :
1395 : #ifdef CATCACHE_STATS
1396 : cache->cc_hits++;
1397 : #endif
1398 :
1399 65433158 : return &ct->tuple;
1400 : }
1401 : else
1402 : {
1403 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1404 : cache->cc_relname, hashIndex);
1405 :
1406 : #ifdef CATCACHE_STATS
1407 : cache->cc_neg_hits++;
1408 : #endif
1409 :
1410 3449726 : return NULL;
1411 : }
1412 : }
1413 :
1414 4716172 : return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1415 : }
1416 :
1417 : /*
1418 : * Search the actual catalogs, rather than the cache.
1419 : *
1420 : * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1421 : * as small as possible. To avoid that effort being undone by a helpful
1422 : * compiler, try to explicitly forbid inlining.
1423 : */
1424 : static pg_noinline HeapTuple
1425 4716172 : SearchCatCacheMiss(CatCache *cache,
1426 : int nkeys,
1427 : uint32 hashValue,
1428 : Index hashIndex,
1429 : Datum v1,
1430 : Datum v2,
1431 : Datum v3,
1432 : Datum v4)
1433 : {
1434 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1435 : Relation relation;
1436 : SysScanDesc scandesc;
1437 : HeapTuple ntp;
1438 : CatCTup *ct;
1439 : bool stale;
1440 : Datum arguments[CATCACHE_MAXKEYS];
1441 :
1442 : /* Initialize local parameter array */
1443 4716172 : arguments[0] = v1;
1444 4716172 : arguments[1] = v2;
1445 4716172 : arguments[2] = v3;
1446 4716172 : arguments[3] = v4;
1447 :
1448 : /*
1449 : * Tuple was not found in cache, so we have to try to retrieve it directly
1450 : * from the relation. If found, we will add it to the cache; if not
1451 : * found, we will add a negative cache entry instead.
1452 : *
1453 : * NOTE: it is possible for recursive cache lookups to occur while reading
1454 : * the relation --- for example, due to shared-cache-inval messages being
1455 : * processed during table_open(). This is OK. It's even possible for one
1456 : * of those lookups to find and enter the very same tuple we are trying to
1457 : * fetch here. If that happens, we will enter a second copy of the tuple
1458 : * into the cache. The first copy will never be referenced again, and
1459 : * will eventually age out of the cache, so there's no functional problem.
1460 : * This case is rare enough that it's not worth expending extra cycles to
1461 : * detect.
1462 : *
1463 : * Another case, which we *must* handle, is that the tuple could become
1464 : * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
1465 : * AcceptInvalidationMessages can run during TOAST table access). We do
1466 : * not want to return already-stale catcache entries, so we loop around
1467 : * and do the table scan again if that happens.
1468 : */
1469 4716172 : relation = table_open(cache->cc_reloid, AccessShareLock);
1470 :
1471 : do
1472 : {
1473 : /*
1474 : * Ok, need to make a lookup in the relation, copy the scankey and
1475 : * fill out any per-call fields. (We must re-do this when retrying,
1476 : * because systable_beginscan scribbles on the scankey.)
1477 : */
1478 4716172 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1479 4716172 : cur_skey[0].sk_argument = v1;
1480 4716172 : cur_skey[1].sk_argument = v2;
1481 4716172 : cur_skey[2].sk_argument = v3;
1482 4716172 : cur_skey[3].sk_argument = v4;
1483 :
1484 4716172 : scandesc = systable_beginscan(relation,
1485 : cache->cc_indexoid,
1486 4716172 : IndexScanOK(cache, cur_skey),
1487 : NULL,
1488 : nkeys,
1489 : cur_skey);
1490 :
1491 4716172 : ct = NULL;
1492 4716172 : stale = false;
1493 :
1494 4716172 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1495 : {
1496 3278600 : ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1497 : hashValue, hashIndex);
1498 : /* upon failure, we must start the scan over */
1499 3278600 : if (ct == NULL)
1500 : {
1501 0 : stale = true;
1502 0 : break;
1503 : }
1504 : /* immediately set the refcount to 1 */
1505 3278600 : ResourceOwnerEnlarge(CurrentResourceOwner);
1506 3278600 : ct->refcount++;
1507 3278600 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1508 3278600 : break; /* assume only one match */
1509 : }
1510 :
1511 4716170 : systable_endscan(scandesc);
1512 4716170 : } while (stale);
1513 :
1514 4716170 : table_close(relation, AccessShareLock);
1515 :
1516 : /*
1517 : * If tuple was not found, we need to build a negative cache entry
1518 : * containing a fake tuple. The fake tuple has the correct key columns,
1519 : * but nulls everywhere else.
1520 : *
1521 : * In bootstrap mode, we don't build negative entries, because the cache
1522 : * invalidation mechanism isn't alive and can't clear them if the tuple
1523 : * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1524 : * cache inval for that.)
1525 : */
1526 4716170 : if (ct == NULL)
1527 : {
1528 1437570 : if (IsBootstrapProcessingMode())
1529 43440 : return NULL;
1530 :
1531 1394130 : ct = CatalogCacheCreateEntry(cache, NULL, NULL, arguments,
1532 : hashValue, hashIndex);
1533 :
1534 : /* Creating a negative cache entry shouldn't fail */
1535 : Assert(ct != NULL);
1536 :
1537 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1538 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1539 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1540 : cache->cc_relname, hashIndex);
1541 :
1542 : /*
1543 : * We are not returning the negative entry to the caller, so leave its
1544 : * refcount zero.
1545 : */
1546 :
1547 1394130 : return NULL;
1548 : }
1549 :
1550 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1551 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1552 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1553 : cache->cc_relname, hashIndex);
1554 :
1555 : #ifdef CATCACHE_STATS
1556 : cache->cc_newloads++;
1557 : #endif
1558 :
1559 3278600 : return &ct->tuple;
1560 : }
1561 :
1562 : /*
1563 : * ReleaseCatCache
1564 : *
1565 : * Decrement the reference count of a catcache entry (releasing the
1566 : * hold grabbed by a successful SearchCatCache).
1567 : *
1568 : * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1569 : * will be freed as soon as their refcount goes to zero. In combination
1570 : * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1571 : * to catch references to already-released catcache entries.
1572 : */
1573 : void
1574 68701538 : ReleaseCatCache(HeapTuple tuple)
1575 : {
1576 68701538 : ReleaseCatCacheWithOwner(tuple, CurrentResourceOwner);
1577 68701538 : }
1578 :
1579 : static void
1580 68711758 : ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner)
1581 : {
1582 68711758 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
1583 : offsetof(CatCTup, tuple));
1584 :
1585 : /* Safety checks to ensure we were handed a cache entry */
1586 : Assert(ct->ct_magic == CT_MAGIC);
1587 : Assert(ct->refcount > 0);
1588 :
1589 68711758 : ct->refcount--;
1590 68711758 : if (resowner)
1591 68701538 : ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1592 :
1593 68711758 : if (
1594 : #ifndef CATCACHE_FORCE_RELEASE
1595 68711758 : ct->dead &&
1596 : #endif
1597 1240 : ct->refcount == 0 &&
1598 1124 : (ct->c_list == NULL || ct->c_list->refcount == 0))
1599 1124 : CatCacheRemoveCTup(ct->my_cache, ct);
1600 68711758 : }
1601 :
1602 :
1603 : /*
1604 : * GetCatCacheHashValue
1605 : *
1606 : * Compute the hash value for a given set of search keys.
1607 : *
1608 : * The reason for exposing this as part of the API is that the hash value is
1609 : * exposed in cache invalidation operations, so there are places outside the
1610 : * catcache code that need to be able to compute the hash values.
1611 : */
1612 : uint32
1613 140680 : GetCatCacheHashValue(CatCache *cache,
1614 : Datum v1,
1615 : Datum v2,
1616 : Datum v3,
1617 : Datum v4)
1618 : {
1619 : /*
1620 : * one-time startup overhead for each cache
1621 : */
1622 140680 : if (cache->cc_tupdesc == NULL)
1623 21468 : CatalogCacheInitializeCache(cache);
1624 :
1625 : /*
1626 : * calculate the hash value
1627 : */
1628 140680 : return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1629 : }
1630 :
1631 :
1632 : /*
1633 : * SearchCatCacheList
1634 : *
1635 : * Generate a list of all tuples matching a partial key (that is,
1636 : * a key specifying just the first K of the cache's N key columns).
1637 : *
1638 : * It doesn't make any sense to specify all of the cache's key columns
1639 : * here: since the key is unique, there could be at most one match, so
1640 : * you ought to use SearchCatCache() instead. Hence this function takes
1641 : * one fewer Datum argument than SearchCatCache() does.
1642 : *
1643 : * The caller must not modify the list object or the pointed-to tuples,
1644 : * and must call ReleaseCatCacheList() when done with the list.
1645 : */
1646 : CatCList *
1647 3022246 : SearchCatCacheList(CatCache *cache,
1648 : int nkeys,
1649 : Datum v1,
1650 : Datum v2,
1651 : Datum v3)
1652 : {
1653 3022246 : Datum v4 = 0; /* dummy last-column value */
1654 : Datum arguments[CATCACHE_MAXKEYS];
1655 : uint32 lHashValue;
1656 : Index lHashIndex;
1657 : dlist_iter iter;
1658 : dlist_head *lbucket;
1659 : CatCList *cl;
1660 : CatCTup *ct;
1661 : List *volatile ctlist;
1662 : ListCell *ctlist_item;
1663 : int nmembers;
1664 : bool ordered;
1665 : HeapTuple ntp;
1666 : MemoryContext oldcxt;
1667 : int i;
1668 :
1669 : /*
1670 : * one-time startup overhead for each cache
1671 : */
1672 3022246 : if (unlikely(cache->cc_tupdesc == NULL))
1673 30218 : CatalogCacheInitializeCache(cache);
1674 :
1675 : Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1676 :
1677 : #ifdef CATCACHE_STATS
1678 : cache->cc_lsearches++;
1679 : #endif
1680 :
1681 : /* Initialize local parameter array */
1682 3022246 : arguments[0] = v1;
1683 3022246 : arguments[1] = v2;
1684 3022246 : arguments[2] = v3;
1685 3022246 : arguments[3] = v4;
1686 :
1687 : /*
1688 : * If we haven't previously done a list search in this cache, create the
1689 : * bucket header array; otherwise, consider whether it's time to enlarge
1690 : * it.
1691 : */
1692 3022246 : if (cache->cc_lbucket == NULL)
1693 : {
1694 : /* Arbitrary initial size --- must be a power of 2 */
1695 34184 : int nbuckets = 16;
1696 :
1697 34184 : cache->cc_lbucket = (dlist_head *)
1698 34184 : MemoryContextAllocZero(CacheMemoryContext,
1699 : nbuckets * sizeof(dlist_head));
1700 : /* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */
1701 34184 : cache->cc_nlbuckets = nbuckets;
1702 : }
1703 : else
1704 : {
1705 : /*
1706 : * If the hash table has become too full, enlarge the buckets array.
1707 : * Quite arbitrarily, we enlarge when fill factor > 2.
1708 : */
1709 2988062 : if (cache->cc_nlist > cache->cc_nlbuckets * 2)
1710 1048 : RehashCatCacheLists(cache);
1711 : }
1712 :
1713 : /*
1714 : * Find the hash bucket in which to look for the CatCList.
1715 : */
1716 3022246 : lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1717 3022246 : lHashIndex = HASH_INDEX(lHashValue, cache->cc_nlbuckets);
1718 :
1719 : /*
1720 : * scan the items until we find a match or exhaust our list
1721 : *
1722 : * Note: it's okay to use dlist_foreach here, even though we modify the
1723 : * dlist within the loop, because we don't continue the loop afterwards.
1724 : */
1725 3022246 : lbucket = &cache->cc_lbucket[lHashIndex];
1726 3333090 : dlist_foreach(iter, lbucket)
1727 : {
1728 3068300 : cl = dlist_container(CatCList, cache_elem, iter.cur);
1729 :
1730 3068300 : if (cl->dead)
1731 0 : continue; /* ignore dead entries */
1732 :
1733 3068300 : if (cl->hash_value != lHashValue)
1734 310844 : continue; /* quickly skip entry if wrong hash val */
1735 :
1736 : /*
1737 : * see if the cached list matches our key.
1738 : */
1739 2757456 : if (cl->nkeys != nkeys)
1740 0 : continue;
1741 :
1742 2757456 : if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1743 0 : continue;
1744 :
1745 : /*
1746 : * We found a matching list. Move the list to the front of the list
1747 : * for its hashbucket, so as to speed subsequent searches. (We do not
1748 : * move the members to the fronts of their hashbucket lists, however,
1749 : * since there's no point in that unless they are searched for
1750 : * individually.)
1751 : */
1752 2757456 : dlist_move_head(lbucket, &cl->cache_elem);
1753 :
1754 : /* Bump the list's refcount and return it */
1755 2757456 : ResourceOwnerEnlarge(CurrentResourceOwner);
1756 2757456 : cl->refcount++;
1757 2757456 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1758 :
1759 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1760 : cache->cc_relname);
1761 :
1762 : #ifdef CATCACHE_STATS
1763 : cache->cc_lhits++;
1764 : #endif
1765 :
1766 2757456 : return cl;
1767 : }
1768 :
1769 : /*
1770 : * List was not found in cache, so we have to build it by reading the
1771 : * relation. For each matching tuple found in the relation, use an
1772 : * existing cache entry if possible, else build a new one.
1773 : *
1774 : * We have to bump the member refcounts temporarily to ensure they won't
1775 : * get dropped from the cache while loading other members. We use a PG_TRY
1776 : * block to ensure we can undo those refcounts if we get an error before
1777 : * we finish constructing the CatCList. ctlist must be valid throughout
1778 : * the PG_TRY block.
1779 : */
1780 264790 : ctlist = NIL;
1781 :
1782 264790 : PG_TRY();
1783 : {
1784 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1785 : Relation relation;
1786 : SysScanDesc scandesc;
1787 : bool stale;
1788 :
1789 264790 : relation = table_open(cache->cc_reloid, AccessShareLock);
1790 :
1791 : do
1792 : {
1793 : /*
1794 : * Ok, need to make a lookup in the relation, copy the scankey and
1795 : * fill out any per-call fields. (We must re-do this when
1796 : * retrying, because systable_beginscan scribbles on the scankey.)
1797 : */
1798 264790 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1799 264790 : cur_skey[0].sk_argument = v1;
1800 264790 : cur_skey[1].sk_argument = v2;
1801 264790 : cur_skey[2].sk_argument = v3;
1802 264790 : cur_skey[3].sk_argument = v4;
1803 :
1804 529580 : scandesc = systable_beginscan(relation,
1805 : cache->cc_indexoid,
1806 264790 : IndexScanOK(cache, cur_skey),
1807 : NULL,
1808 : nkeys,
1809 : cur_skey);
1810 :
1811 : /* The list will be ordered iff we are doing an index scan */
1812 264790 : ordered = (scandesc->irel != NULL);
1813 :
1814 264790 : stale = false;
1815 :
1816 1079734 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1817 : {
1818 : uint32 hashValue;
1819 : Index hashIndex;
1820 814944 : bool found = false;
1821 : dlist_head *bucket;
1822 :
1823 : /*
1824 : * See if there's an entry for this tuple already.
1825 : */
1826 814944 : ct = NULL;
1827 814944 : hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1828 814944 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1829 :
1830 814944 : bucket = &cache->cc_bucket[hashIndex];
1831 1131640 : dlist_foreach(iter, bucket)
1832 : {
1833 437716 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1834 :
1835 437716 : if (ct->dead || ct->negative)
1836 802 : continue; /* ignore dead and negative entries */
1837 :
1838 436914 : if (ct->hash_value != hashValue)
1839 299556 : continue; /* quickly skip entry if wrong hash val */
1840 :
1841 137358 : if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1842 0 : continue; /* not same tuple */
1843 :
1844 : /*
1845 : * Found a match, but can't use it if it belongs to
1846 : * another list already
1847 : */
1848 137358 : if (ct->c_list)
1849 16338 : continue;
1850 :
1851 121020 : found = true;
1852 121020 : break; /* A-OK */
1853 : }
1854 :
1855 814944 : if (!found)
1856 : {
1857 : /* We didn't find a usable entry, so make a new one */
1858 693924 : ct = CatalogCacheCreateEntry(cache, ntp, scandesc, NULL,
1859 : hashValue, hashIndex);
1860 : /* upon failure, we must start the scan over */
1861 693924 : if (ct == NULL)
1862 : {
1863 : /*
1864 : * Release refcounts on any items we already had. We
1865 : * dare not try to free them if they're now
1866 : * unreferenced, since an error while doing that would
1867 : * result in the PG_CATCH below doing extra refcount
1868 : * decrements. Besides, we'll likely re-adopt those
1869 : * items in the next iteration, so it's not worth
1870 : * complicating matters to try to get rid of them.
1871 : */
1872 0 : foreach(ctlist_item, ctlist)
1873 : {
1874 0 : ct = (CatCTup *) lfirst(ctlist_item);
1875 : Assert(ct->c_list == NULL);
1876 : Assert(ct->refcount > 0);
1877 0 : ct->refcount--;
1878 : }
1879 : /* Reset ctlist in preparation for new try */
1880 0 : ctlist = NIL;
1881 0 : stale = true;
1882 0 : break;
1883 : }
1884 : }
1885 :
1886 : /* Careful here: add entry to ctlist, then bump its refcount */
1887 : /* This way leaves state correct if lappend runs out of memory */
1888 814944 : ctlist = lappend(ctlist, ct);
1889 814944 : ct->refcount++;
1890 : }
1891 :
1892 264790 : systable_endscan(scandesc);
1893 264790 : } while (stale);
1894 :
1895 264790 : table_close(relation, AccessShareLock);
1896 :
1897 : /* Make sure the resource owner has room to remember this entry. */
1898 264790 : ResourceOwnerEnlarge(CurrentResourceOwner);
1899 :
1900 : /* Now we can build the CatCList entry. */
1901 264790 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1902 264790 : nmembers = list_length(ctlist);
1903 : cl = (CatCList *)
1904 264790 : palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
1905 :
1906 : /* Extract key values */
1907 264790 : CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
1908 264790 : arguments, cl->keys);
1909 264790 : MemoryContextSwitchTo(oldcxt);
1910 :
1911 : /*
1912 : * We are now past the last thing that could trigger an elog before we
1913 : * have finished building the CatCList and remembering it in the
1914 : * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1915 : * we'd better do so before we start marking the members as belonging
1916 : * to the list.
1917 : */
1918 : }
1919 0 : PG_CATCH();
1920 : {
1921 0 : foreach(ctlist_item, ctlist)
1922 : {
1923 0 : ct = (CatCTup *) lfirst(ctlist_item);
1924 : Assert(ct->c_list == NULL);
1925 : Assert(ct->refcount > 0);
1926 0 : ct->refcount--;
1927 0 : if (
1928 : #ifndef CATCACHE_FORCE_RELEASE
1929 0 : ct->dead &&
1930 : #endif
1931 0 : ct->refcount == 0 &&
1932 0 : (ct->c_list == NULL || ct->c_list->refcount == 0))
1933 0 : CatCacheRemoveCTup(cache, ct);
1934 : }
1935 :
1936 0 : PG_RE_THROW();
1937 : }
1938 264790 : PG_END_TRY();
1939 :
1940 264790 : cl->cl_magic = CL_MAGIC;
1941 264790 : cl->my_cache = cache;
1942 264790 : cl->refcount = 0; /* for the moment */
1943 264790 : cl->dead = false;
1944 264790 : cl->ordered = ordered;
1945 264790 : cl->nkeys = nkeys;
1946 264790 : cl->hash_value = lHashValue;
1947 264790 : cl->n_members = nmembers;
1948 :
1949 264790 : i = 0;
1950 1079734 : foreach(ctlist_item, ctlist)
1951 : {
1952 814944 : cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1953 : Assert(ct->c_list == NULL);
1954 814944 : ct->c_list = cl;
1955 : /* release the temporary refcount on the member */
1956 : Assert(ct->refcount > 0);
1957 814944 : ct->refcount--;
1958 : /* mark list dead if any members already dead */
1959 814944 : if (ct->dead)
1960 0 : cl->dead = true;
1961 : }
1962 : Assert(i == nmembers);
1963 :
1964 : /*
1965 : * Add the CatCList to the appropriate bucket, and count it.
1966 : */
1967 264790 : dlist_push_head(lbucket, &cl->cache_elem);
1968 :
1969 264790 : cache->cc_nlist++;
1970 :
1971 : /* Finally, bump the list's refcount and return it */
1972 264790 : cl->refcount++;
1973 264790 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1974 :
1975 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1976 : cache->cc_relname, nmembers);
1977 :
1978 264790 : return cl;
1979 : }
1980 :
1981 : /*
1982 : * ReleaseCatCacheList
1983 : *
1984 : * Decrement the reference count of a catcache list.
1985 : */
1986 : void
1987 3022210 : ReleaseCatCacheList(CatCList *list)
1988 : {
1989 3022210 : ReleaseCatCacheListWithOwner(list, CurrentResourceOwner);
1990 3022210 : }
1991 :
1992 : static void
1993 3022246 : ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner)
1994 : {
1995 : /* Safety checks to ensure we were handed a cache entry */
1996 : Assert(list->cl_magic == CL_MAGIC);
1997 : Assert(list->refcount > 0);
1998 3022246 : list->refcount--;
1999 3022246 : if (resowner)
2000 3022210 : ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
2001 :
2002 3022246 : if (
2003 : #ifndef CATCACHE_FORCE_RELEASE
2004 3022246 : list->dead &&
2005 : #endif
2006 6 : list->refcount == 0)
2007 6 : CatCacheRemoveCList(list->my_cache, list);
2008 3022246 : }
2009 :
2010 :
2011 : /*
2012 : * equalTuple
2013 : * Are these tuples memcmp()-equal?
2014 : */
2015 : static bool
2016 6 : equalTuple(HeapTuple a, HeapTuple b)
2017 : {
2018 : uint32 alen;
2019 : uint32 blen;
2020 :
2021 6 : alen = a->t_len;
2022 6 : blen = b->t_len;
2023 12 : return (alen == blen &&
2024 6 : memcmp((char *) a->t_data,
2025 6 : (char *) b->t_data, blen) == 0);
2026 : }
2027 :
2028 : /*
2029 : * CatalogCacheCreateEntry
2030 : * Create a new CatCTup entry, copying the given HeapTuple and other
2031 : * supplied data into it. The new entry initially has refcount 0.
2032 : *
2033 : * To create a normal cache entry, ntp must be the HeapTuple just fetched
2034 : * from scandesc, and "arguments" is not used. To create a negative cache
2035 : * entry, pass NULL for ntp and scandesc; then "arguments" is the cache
2036 : * keys to use. In either case, hashValue/hashIndex are the hash values
2037 : * computed from the cache keys.
2038 : *
2039 : * Returns NULL if we attempt to detoast the tuple and observe that it
2040 : * became stale. (This cannot happen for a negative entry.) Caller must
2041 : * retry the tuple lookup in that case.
2042 : */
2043 : static CatCTup *
2044 5366654 : CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, SysScanDesc scandesc,
2045 : Datum *arguments,
2046 : uint32 hashValue, Index hashIndex)
2047 : {
2048 : CatCTup *ct;
2049 : HeapTuple dtp;
2050 : MemoryContext oldcxt;
2051 :
2052 5366654 : if (ntp)
2053 : {
2054 : int i;
2055 :
2056 : /*
2057 : * The visibility recheck below essentially never fails during our
2058 : * regression tests, and there's no easy way to force it to fail for
2059 : * testing purposes. To ensure we have test coverage for the retry
2060 : * paths in our callers, make debug builds randomly fail about 0.1% of
2061 : * the times through this code path, even when there's no toasted
2062 : * fields.
2063 : */
2064 : #ifdef USE_ASSERT_CHECKING
2065 : if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 1000))
2066 : return NULL;
2067 : #endif
2068 :
2069 : /*
2070 : * If there are any out-of-line toasted fields in the tuple, expand
2071 : * them in-line. This saves cycles during later use of the catcache
2072 : * entry, and also protects us against the possibility of the toast
2073 : * tuples being freed before we attempt to fetch them, in case of
2074 : * something using a slightly stale catcache entry.
2075 : */
2076 3972524 : if (HeapTupleHasExternal(ntp))
2077 : {
2078 3172 : bool need_cmp = IsInplaceUpdateOid(cache->cc_reloid);
2079 3172 : HeapTuple before = NULL;
2080 3172 : bool matches = true;
2081 :
2082 3172 : if (need_cmp)
2083 6 : before = heap_copytuple(ntp);
2084 3172 : dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
2085 :
2086 : /*
2087 : * The tuple could become stale while we are doing toast table
2088 : * access (since AcceptInvalidationMessages can run then).
2089 : * equalTuple() detects staleness from inplace updates, while
2090 : * systable_recheck_tuple() detects staleness from normal updates.
2091 : *
2092 : * While this equalTuple() follows the usual rule of reading with
2093 : * a pin and no buffer lock, it warrants suspicion since an
2094 : * inplace update could appear at any moment. It's safe because
2095 : * the inplace update sends an invalidation that can't reorder
2096 : * before the inplace heap change. If the heap change reaches
2097 : * this process just after equalTuple() looks, we've not missed
2098 : * its inval.
2099 : */
2100 3172 : if (need_cmp)
2101 : {
2102 6 : matches = equalTuple(before, ntp);
2103 6 : heap_freetuple(before);
2104 : }
2105 3172 : if (!matches || !systable_recheck_tuple(scandesc, ntp))
2106 : {
2107 0 : heap_freetuple(dtp);
2108 0 : return NULL;
2109 : }
2110 : }
2111 : else
2112 3969352 : dtp = ntp;
2113 :
2114 : /* Allocate memory for CatCTup and the cached tuple in one go */
2115 3972524 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2116 :
2117 3972524 : ct = (CatCTup *) palloc(sizeof(CatCTup) +
2118 3972524 : MAXIMUM_ALIGNOF + dtp->t_len);
2119 3972524 : ct->tuple.t_len = dtp->t_len;
2120 3972524 : ct->tuple.t_self = dtp->t_self;
2121 3972524 : ct->tuple.t_tableOid = dtp->t_tableOid;
2122 3972524 : ct->tuple.t_data = (HeapTupleHeader)
2123 3972524 : MAXALIGN(((char *) ct) + sizeof(CatCTup));
2124 : /* copy tuple contents */
2125 3972524 : memcpy((char *) ct->tuple.t_data,
2126 3972524 : (const char *) dtp->t_data,
2127 3972524 : dtp->t_len);
2128 3972524 : MemoryContextSwitchTo(oldcxt);
2129 :
2130 3972524 : if (dtp != ntp)
2131 3172 : heap_freetuple(dtp);
2132 :
2133 : /* extract keys - they'll point into the tuple if not by-value */
2134 11605688 : for (i = 0; i < cache->cc_nkeys; i++)
2135 : {
2136 : Datum atp;
2137 : bool isnull;
2138 :
2139 7633164 : atp = heap_getattr(&ct->tuple,
2140 : cache->cc_keyno[i],
2141 : cache->cc_tupdesc,
2142 : &isnull);
2143 : Assert(!isnull);
2144 7633164 : ct->keys[i] = atp;
2145 : }
2146 : }
2147 : else
2148 : {
2149 : /* Set up keys for a negative cache entry */
2150 1394130 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2151 1394130 : ct = (CatCTup *) palloc(sizeof(CatCTup));
2152 :
2153 : /*
2154 : * Store keys - they'll point into separately allocated memory if not
2155 : * by-value.
2156 : */
2157 1394130 : CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
2158 1394130 : arguments, ct->keys);
2159 1394130 : MemoryContextSwitchTo(oldcxt);
2160 : }
2161 :
2162 : /*
2163 : * Finish initializing the CatCTup header, and add it to the cache's
2164 : * linked list and counts.
2165 : */
2166 5366654 : ct->ct_magic = CT_MAGIC;
2167 5366654 : ct->my_cache = cache;
2168 5366654 : ct->c_list = NULL;
2169 5366654 : ct->refcount = 0; /* for the moment */
2170 5366654 : ct->dead = false;
2171 5366654 : ct->negative = (ntp == NULL);
2172 5366654 : ct->hash_value = hashValue;
2173 :
2174 5366654 : dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
2175 :
2176 5366654 : cache->cc_ntup++;
2177 5366654 : CacheHdr->ch_ntup++;
2178 :
2179 : /*
2180 : * If the hash table has become too full, enlarge the buckets array. Quite
2181 : * arbitrarily, we enlarge when fill factor > 2.
2182 : */
2183 5366654 : if (cache->cc_ntup > cache->cc_nbuckets * 2)
2184 4940 : RehashCatCache(cache);
2185 :
2186 5366654 : return ct;
2187 : }
2188 :
2189 : /*
2190 : * Helper routine that frees keys stored in the keys array.
2191 : */
2192 : static void
2193 494634 : CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
2194 : {
2195 : int i;
2196 :
2197 1520976 : for (i = 0; i < nkeys; i++)
2198 : {
2199 1026342 : int attnum = attnos[i];
2200 : Form_pg_attribute att;
2201 :
2202 : /* system attribute are not supported in caches */
2203 : Assert(attnum > 0);
2204 :
2205 1026342 : att = TupleDescAttr(tupdesc, attnum - 1);
2206 :
2207 1026342 : if (!att->attbyval)
2208 433326 : pfree(DatumGetPointer(keys[i]));
2209 : }
2210 494634 : }
2211 :
2212 : /*
2213 : * Helper routine that copies the keys in the srckeys array into the dstkeys
2214 : * one, guaranteeing that the datums are fully allocated in the current memory
2215 : * context.
2216 : */
2217 : static void
2218 1658920 : CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
2219 : Datum *srckeys, Datum *dstkeys)
2220 : {
2221 : int i;
2222 :
2223 : /*
2224 : * XXX: memory and lookup performance could possibly be improved by
2225 : * storing all keys in one allocation.
2226 : */
2227 :
2228 5184240 : for (i = 0; i < nkeys; i++)
2229 : {
2230 3525320 : int attnum = attnos[i];
2231 3525320 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2232 3525320 : Datum src = srckeys[i];
2233 : NameData srcname;
2234 :
2235 : /*
2236 : * Must be careful in case the caller passed a C string where a NAME
2237 : * is wanted: convert the given argument to a correctly padded NAME.
2238 : * Otherwise the memcpy() done by datumCopy() could fall off the end
2239 : * of memory.
2240 : */
2241 3525320 : if (att->atttypid == NAMEOID)
2242 : {
2243 722614 : namestrcpy(&srcname, DatumGetCString(src));
2244 722614 : src = NameGetDatum(&srcname);
2245 : }
2246 :
2247 3525320 : dstkeys[i] = datumCopy(src,
2248 3525320 : att->attbyval,
2249 3525320 : att->attlen);
2250 : }
2251 1658920 : }
2252 :
2253 : /*
2254 : * PrepareToInvalidateCacheTuple()
2255 : *
2256 : * This is part of a rather subtle chain of events, so pay attention:
2257 : *
2258 : * When a tuple is inserted or deleted, it cannot be flushed from the
2259 : * catcaches immediately, for reasons explained at the top of cache/inval.c.
2260 : * Instead we have to add entry(s) for the tuple to a list of pending tuple
2261 : * invalidations that will be done at the end of the command or transaction.
2262 : *
2263 : * The lists of tuples that need to be flushed are kept by inval.c. This
2264 : * routine is a helper routine for inval.c. Given a tuple belonging to
2265 : * the specified relation, find all catcaches it could be in, compute the
2266 : * correct hash value for each such catcache, and call the specified
2267 : * function to record the cache id and hash value in inval.c's lists.
2268 : * SysCacheInvalidate will be called later, if appropriate,
2269 : * using the recorded information.
2270 : *
2271 : * For an insert or delete, tuple is the target tuple and newtuple is NULL.
2272 : * For an update, we are called just once, with tuple being the old tuple
2273 : * version and newtuple the new version. We should make two list entries
2274 : * if the tuple's hash value changed, but only one if it didn't.
2275 : *
2276 : * Note that it is irrelevant whether the given tuple is actually loaded
2277 : * into the catcache at the moment. Even if it's not there now, it might
2278 : * be by the end of the command, or there might be a matching negative entry
2279 : * to flush --- or other backends' caches might have such entries --- so
2280 : * we have to make list entries to flush it later.
2281 : *
2282 : * Also note that it's not an error if there are no catcaches for the
2283 : * specified relation. inval.c doesn't know exactly which rels have
2284 : * catcaches --- it will call this routine for any tuple that's in a
2285 : * system relation.
2286 : */
2287 : void
2288 2539968 : PrepareToInvalidateCacheTuple(Relation relation,
2289 : HeapTuple tuple,
2290 : HeapTuple newtuple,
2291 : void (*function) (int, uint32, Oid))
2292 : {
2293 : slist_iter iter;
2294 : Oid reloid;
2295 :
2296 : CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2297 :
2298 : /*
2299 : * sanity checks
2300 : */
2301 : Assert(RelationIsValid(relation));
2302 : Assert(HeapTupleIsValid(tuple));
2303 : Assert(PointerIsValid(function));
2304 : Assert(CacheHdr != NULL);
2305 :
2306 2539968 : reloid = RelationGetRelid(relation);
2307 :
2308 : /* ----------------
2309 : * for each cache
2310 : * if the cache contains tuples from the specified relation
2311 : * compute the tuple's hash value(s) in this cache,
2312 : * and call the passed function to register the information.
2313 : * ----------------
2314 : */
2315 :
2316 213357312 : slist_foreach(iter, &CacheHdr->ch_caches)
2317 : {
2318 210817344 : CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2319 : uint32 hashvalue;
2320 : Oid dbid;
2321 :
2322 210817344 : if (ccp->cc_reloid != reloid)
2323 206182534 : continue;
2324 :
2325 : /* Just in case cache hasn't finished initialization yet... */
2326 4634810 : if (ccp->cc_tupdesc == NULL)
2327 7764 : CatalogCacheInitializeCache(ccp);
2328 :
2329 4634810 : hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2330 4634810 : dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2331 :
2332 4634810 : (*function) (ccp->id, hashvalue, dbid);
2333 :
2334 4634810 : if (newtuple)
2335 : {
2336 : uint32 newhashvalue;
2337 :
2338 354706 : newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2339 :
2340 354706 : if (newhashvalue != hashvalue)
2341 6032 : (*function) (ccp->id, newhashvalue, dbid);
2342 : }
2343 : }
2344 2539968 : }
2345 :
2346 : /* ResourceOwner callbacks */
2347 :
2348 : static void
2349 10220 : ResOwnerReleaseCatCache(Datum res)
2350 : {
2351 10220 : ReleaseCatCacheWithOwner((HeapTuple) DatumGetPointer(res), NULL);
2352 10220 : }
2353 :
2354 : static char *
2355 0 : ResOwnerPrintCatCache(Datum res)
2356 : {
2357 0 : HeapTuple tuple = (HeapTuple) DatumGetPointer(res);
2358 0 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
2359 : offsetof(CatCTup, tuple));
2360 :
2361 : /* Safety check to ensure we were handed a cache entry */
2362 : Assert(ct->ct_magic == CT_MAGIC);
2363 :
2364 0 : return psprintf("cache %s (%d), tuple %u/%u has count %d",
2365 0 : ct->my_cache->cc_relname, ct->my_cache->id,
2366 0 : ItemPointerGetBlockNumber(&(tuple->t_self)),
2367 0 : ItemPointerGetOffsetNumber(&(tuple->t_self)),
2368 : ct->refcount);
2369 : }
2370 :
2371 : static void
2372 36 : ResOwnerReleaseCatCacheList(Datum res)
2373 : {
2374 36 : ReleaseCatCacheListWithOwner((CatCList *) DatumGetPointer(res), NULL);
2375 36 : }
2376 :
2377 : static char *
2378 0 : ResOwnerPrintCatCacheList(Datum res)
2379 : {
2380 0 : CatCList *list = (CatCList *) DatumGetPointer(res);
2381 :
2382 0 : return psprintf("cache %s (%d), list %p has count %d",
2383 0 : list->my_cache->cc_relname, list->my_cache->id,
2384 : list, list->refcount);
2385 : }
|