Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * catcache.c
4 : * System catalog cache for tuples matching a key.
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/utils/cache/catcache.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include "access/genam.h"
18 : #include "access/heaptoast.h"
19 : #include "access/relscan.h"
20 : #include "access/sysattr.h"
21 : #include "access/table.h"
22 : #include "access/xact.h"
23 : #include "catalog/pg_collation.h"
24 : #include "catalog/pg_operator.h"
25 : #include "catalog/pg_type.h"
26 : #include "common/hashfn.h"
27 : #include "miscadmin.h"
28 : #include "port/pg_bitutils.h"
29 : #ifdef CATCACHE_STATS
30 : #include "storage/ipc.h" /* for on_proc_exit */
31 : #endif
32 : #include "storage/lmgr.h"
33 : #include "utils/builtins.h"
34 : #include "utils/datum.h"
35 : #include "utils/fmgroids.h"
36 : #include "utils/inval.h"
37 : #include "utils/memutils.h"
38 : #include "utils/rel.h"
39 : #include "utils/resowner_private.h"
40 : #include "utils/syscache.h"
41 :
42 :
43 : /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
44 :
45 : /*
46 : * Given a hash value and the size of the hash table, find the bucket
47 : * in which the hash value belongs. Since the hash table must contain
48 : * a power-of-2 number of elements, this is a simple bitmask.
49 : */
50 : #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
51 :
52 :
53 : /*
54 : * variables, macros and other stuff
55 : */
56 :
57 : #ifdef CACHEDEBUG
58 : #define CACHE_elog(...) elog(__VA_ARGS__)
59 : #else
60 : #define CACHE_elog(...)
61 : #endif
62 :
63 : /* Cache management header --- pointer is NULL until created */
64 : static CatCacheHeader *CacheHdr = NULL;
65 :
66 : static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
67 : int nkeys,
68 : Datum v1, Datum v2,
69 : Datum v3, Datum v4);
70 :
71 : static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
72 : int nkeys,
73 : uint32 hashValue,
74 : Index hashIndex,
75 : Datum v1, Datum v2,
76 : Datum v3, Datum v4);
77 :
78 : static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
79 : Datum v1, Datum v2, Datum v3, Datum v4);
80 : static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
81 : HeapTuple tuple);
82 : static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
83 : const Datum *cachekeys,
84 : const Datum *searchkeys);
85 :
86 : #ifdef CATCACHE_STATS
87 : static void CatCachePrintStats(int code, Datum arg);
88 : #endif
89 : static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
90 : static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
91 : static void CatalogCacheInitializeCache(CatCache *cache);
92 : static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
93 : Datum *arguments,
94 : uint32 hashValue, Index hashIndex,
95 : bool negative);
96 :
97 : static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
98 : Datum *keys);
99 : static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
100 : Datum *srckeys, Datum *dstkeys);
101 :
102 :
103 : /*
104 : * internal support functions
105 : */
106 :
107 : /*
108 : * Hash and equality functions for system types that are used as cache key
109 : * fields. In some cases, we just call the regular SQL-callable functions for
110 : * the appropriate data type, but that tends to be a little slow, and the
111 : * speed of these functions is performance-critical. Therefore, for data
112 : * types that frequently occur as catcache keys, we hard-code the logic here.
113 : * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
114 : * in certain cases (like int4) we can adopt a faster hash algorithm as well.
115 : */
116 :
117 : static bool
118 3947324 : chareqfast(Datum a, Datum b)
119 : {
120 3947324 : return DatumGetChar(a) == DatumGetChar(b);
121 : }
122 :
123 : static uint32
124 4998472 : charhashfast(Datum datum)
125 : {
126 4998472 : return murmurhash32((int32) DatumGetChar(datum));
127 : }
128 :
129 : static bool
130 5366814 : nameeqfast(Datum a, Datum b)
131 : {
132 5366814 : char *ca = NameStr(*DatumGetName(a));
133 5366814 : char *cb = NameStr(*DatumGetName(b));
134 :
135 5366814 : return strncmp(ca, cb, NAMEDATALEN) == 0;
136 : }
137 :
138 : static uint32
139 13929006 : namehashfast(Datum datum)
140 : {
141 13929006 : char *key = NameStr(*DatumGetName(datum));
142 :
143 13929006 : return hash_any((unsigned char *) key, strlen(key));
144 : }
145 :
146 : static bool
147 6782464 : int2eqfast(Datum a, Datum b)
148 : {
149 6782464 : return DatumGetInt16(a) == DatumGetInt16(b);
150 : }
151 :
152 : static uint32
153 10798846 : int2hashfast(Datum datum)
154 : {
155 10798846 : return murmurhash32((int32) DatumGetInt16(datum));
156 : }
157 :
158 : static bool
159 112053786 : int4eqfast(Datum a, Datum b)
160 : {
161 112053786 : return DatumGetInt32(a) == DatumGetInt32(b);
162 : }
163 :
164 : static uint32
165 136324950 : int4hashfast(Datum datum)
166 : {
167 136324950 : return murmurhash32((int32) DatumGetInt32(datum));
168 : }
169 :
170 : static bool
171 170 : texteqfast(Datum a, Datum b)
172 : {
173 : /*
174 : * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
175 : * want to take the fast "deterministic" path in texteq().
176 : */
177 170 : return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
178 : }
179 :
180 : static uint32
181 2952 : texthashfast(Datum datum)
182 : {
183 : /* analogously here as in texteqfast() */
184 2952 : return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
185 : }
186 :
187 : static bool
188 14618 : oidvectoreqfast(Datum a, Datum b)
189 : {
190 14618 : return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b));
191 : }
192 :
193 : static uint32
194 858942 : oidvectorhashfast(Datum datum)
195 : {
196 858942 : return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum));
197 : }
198 :
199 : /* Lookup support functions for a type. */
200 : static void
201 849782 : GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
202 : {
203 849782 : switch (keytype)
204 : {
205 11436 : case BOOLOID:
206 11436 : *hashfunc = charhashfast;
207 11436 : *fasteqfunc = chareqfast;
208 11436 : *eqfunc = F_BOOLEQ;
209 11436 : break;
210 15480 : case CHAROID:
211 15480 : *hashfunc = charhashfast;
212 15480 : *fasteqfunc = chareqfast;
213 15480 : *eqfunc = F_CHAREQ;
214 15480 : break;
215 160840 : case NAMEOID:
216 160840 : *hashfunc = namehashfast;
217 160840 : *fasteqfunc = nameeqfast;
218 160840 : *eqfunc = F_NAMEEQ;
219 160840 : break;
220 49586 : case INT2OID:
221 49586 : *hashfunc = int2hashfast;
222 49586 : *fasteqfunc = int2eqfast;
223 49586 : *eqfunc = F_INT2EQ;
224 49586 : break;
225 12474 : case INT4OID:
226 12474 : *hashfunc = int4hashfast;
227 12474 : *fasteqfunc = int4eqfast;
228 12474 : *eqfunc = F_INT4EQ;
229 12474 : break;
230 5520 : case TEXTOID:
231 5520 : *hashfunc = texthashfast;
232 5520 : *fasteqfunc = texteqfast;
233 5520 : *eqfunc = F_TEXTEQ;
234 5520 : break;
235 583072 : case OIDOID:
236 : case REGPROCOID:
237 : case REGPROCEDUREOID:
238 : case REGOPEROID:
239 : case REGOPERATOROID:
240 : case REGCLASSOID:
241 : case REGTYPEOID:
242 : case REGCOLLATIONOID:
243 : case REGCONFIGOID:
244 : case REGDICTIONARYOID:
245 : case REGROLEOID:
246 : case REGNAMESPACEOID:
247 583072 : *hashfunc = int4hashfast;
248 583072 : *fasteqfunc = int4eqfast;
249 583072 : *eqfunc = F_OIDEQ;
250 583072 : break;
251 11374 : case OIDVECTOROID:
252 11374 : *hashfunc = oidvectorhashfast;
253 11374 : *fasteqfunc = oidvectoreqfast;
254 11374 : *eqfunc = F_OIDVECTOREQ;
255 11374 : break;
256 0 : default:
257 0 : elog(FATAL, "type %u not supported as catcache key", keytype);
258 : *hashfunc = NULL; /* keep compiler quiet */
259 :
260 : *eqfunc = InvalidOid;
261 : break;
262 : }
263 849782 : }
264 :
265 : /*
266 : * CatalogCacheComputeHashValue
267 : *
268 : * Compute the hash value associated with a given set of lookup keys
269 : */
270 : static uint32
271 119653972 : CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
272 : Datum v1, Datum v2, Datum v3, Datum v4)
273 : {
274 119653972 : uint32 hashValue = 0;
275 : uint32 oneHash;
276 119653972 : CCHashFN *cc_hashfunc = cache->cc_hashfunc;
277 :
278 : CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
279 : cache->cc_relname, nkeys, cache);
280 :
281 119653972 : switch (nkeys)
282 : {
283 4132530 : case 4:
284 4132530 : oneHash = (cc_hashfunc[3]) (v4);
285 4132530 : hashValue ^= pg_rotate_left32(oneHash, 24);
286 : /* FALLTHROUGH */
287 12016810 : case 3:
288 12016810 : oneHash = (cc_hashfunc[2]) (v3);
289 12016810 : hashValue ^= pg_rotate_left32(oneHash, 16);
290 : /* FALLTHROUGH */
291 31109856 : case 2:
292 31109856 : oneHash = (cc_hashfunc[1]) (v2);
293 31109856 : hashValue ^= pg_rotate_left32(oneHash, 8);
294 : /* FALLTHROUGH */
295 119653972 : case 1:
296 119653972 : oneHash = (cc_hashfunc[0]) (v1);
297 119653972 : hashValue ^= oneHash;
298 119653972 : break;
299 0 : default:
300 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
301 : break;
302 : }
303 :
304 119653972 : return hashValue;
305 : }
306 :
307 : /*
308 : * CatalogCacheComputeTupleHashValue
309 : *
310 : * Compute the hash value associated with a given tuple to be cached
311 : */
312 : static uint32
313 10819730 : CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
314 : {
315 10819730 : Datum v1 = 0,
316 10819730 : v2 = 0,
317 10819730 : v3 = 0,
318 10819730 : v4 = 0;
319 10819730 : bool isNull = false;
320 10819730 : int *cc_keyno = cache->cc_keyno;
321 10819730 : TupleDesc cc_tupdesc = cache->cc_tupdesc;
322 :
323 : /* Now extract key fields from tuple, insert into scankey */
324 10819730 : switch (nkeys)
325 : {
326 445738 : case 4:
327 445738 : v4 = fastgetattr(tuple,
328 445738 : cc_keyno[3],
329 : cc_tupdesc,
330 : &isNull);
331 : Assert(!isNull);
332 : /* FALLTHROUGH */
333 2525280 : case 3:
334 2525280 : v3 = fastgetattr(tuple,
335 2525280 : cc_keyno[2],
336 : cc_tupdesc,
337 : &isNull);
338 : Assert(!isNull);
339 : /* FALLTHROUGH */
340 8017858 : case 2:
341 8017858 : v2 = fastgetattr(tuple,
342 8017858 : cc_keyno[1],
343 : cc_tupdesc,
344 : &isNull);
345 : Assert(!isNull);
346 : /* FALLTHROUGH */
347 10819730 : case 1:
348 10819730 : v1 = fastgetattr(tuple,
349 : cc_keyno[0],
350 : cc_tupdesc,
351 : &isNull);
352 : Assert(!isNull);
353 10819730 : break;
354 0 : default:
355 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
356 : break;
357 : }
358 :
359 10819730 : return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
360 : }
361 :
362 : /*
363 : * CatalogCacheCompareTuple
364 : *
365 : * Compare a tuple to the passed arguments.
366 : */
367 : static inline bool
368 99860184 : CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
369 : const Datum *cachekeys,
370 : const Datum *searchkeys)
371 : {
372 99860184 : const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
373 : int i;
374 :
375 228025360 : for (i = 0; i < nkeys; i++)
376 : {
377 128165176 : if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
378 0 : return false;
379 : }
380 99860184 : return true;
381 : }
382 :
383 :
384 : #ifdef CATCACHE_STATS
385 :
386 : static void
387 : CatCachePrintStats(int code, Datum arg)
388 : {
389 : slist_iter iter;
390 : long cc_searches = 0;
391 : long cc_hits = 0;
392 : long cc_neg_hits = 0;
393 : long cc_newloads = 0;
394 : long cc_invals = 0;
395 : long cc_lsearches = 0;
396 : long cc_lhits = 0;
397 :
398 : slist_foreach(iter, &CacheHdr->ch_caches)
399 : {
400 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
401 :
402 : if (cache->cc_ntup == 0 && cache->cc_searches == 0)
403 : continue; /* don't print unused caches */
404 : elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
405 : cache->cc_relname,
406 : cache->cc_indexoid,
407 : cache->cc_ntup,
408 : cache->cc_searches,
409 : cache->cc_hits,
410 : cache->cc_neg_hits,
411 : cache->cc_hits + cache->cc_neg_hits,
412 : cache->cc_newloads,
413 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
414 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
415 : cache->cc_invals,
416 : cache->cc_lsearches,
417 : cache->cc_lhits);
418 : cc_searches += cache->cc_searches;
419 : cc_hits += cache->cc_hits;
420 : cc_neg_hits += cache->cc_neg_hits;
421 : cc_newloads += cache->cc_newloads;
422 : cc_invals += cache->cc_invals;
423 : cc_lsearches += cache->cc_lsearches;
424 : cc_lhits += cache->cc_lhits;
425 : }
426 : elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lsrch, %ld lhits",
427 : CacheHdr->ch_ntup,
428 : cc_searches,
429 : cc_hits,
430 : cc_neg_hits,
431 : cc_hits + cc_neg_hits,
432 : cc_newloads,
433 : cc_searches - cc_hits - cc_neg_hits - cc_newloads,
434 : cc_searches - cc_hits - cc_neg_hits,
435 : cc_invals,
436 : cc_lsearches,
437 : cc_lhits);
438 : }
439 : #endif /* CATCACHE_STATS */
440 :
441 :
442 : /*
443 : * CatCacheRemoveCTup
444 : *
445 : * Unlink and delete the given cache entry
446 : *
447 : * NB: if it is a member of a CatCList, the CatCList is deleted too.
448 : * Both the cache entry and the list had better have zero refcount.
449 : */
450 : static void
451 2805420 : CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
452 : {
453 : Assert(ct->refcount == 0);
454 : Assert(ct->my_cache == cache);
455 :
456 2805420 : if (ct->c_list)
457 : {
458 : /*
459 : * The cleanest way to handle this is to call CatCacheRemoveCList,
460 : * which will recurse back to me, and the recursive call will do the
461 : * work. Set the "dead" flag to make sure it does recurse.
462 : */
463 0 : ct->dead = true;
464 0 : CatCacheRemoveCList(cache, ct->c_list);
465 0 : return; /* nothing left to do */
466 : }
467 :
468 : /* delink from linked list */
469 2805420 : dlist_delete(&ct->cache_elem);
470 :
471 : /*
472 : * Free keys when we're dealing with a negative entry, normal entries just
473 : * point into tuple, allocated together with the CatCTup.
474 : */
475 2805420 : if (ct->negative)
476 1311534 : CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
477 1311534 : cache->cc_keyno, ct->keys);
478 :
479 2805420 : pfree(ct);
480 :
481 2805420 : --cache->cc_ntup;
482 2805420 : --CacheHdr->ch_ntup;
483 : }
484 :
485 : /*
486 : * CatCacheRemoveCList
487 : *
488 : * Unlink and delete the given cache list entry
489 : *
490 : * NB: any dead member entries that become unreferenced are deleted too.
491 : */
492 : static void
493 264666 : CatCacheRemoveCList(CatCache *cache, CatCList *cl)
494 : {
495 : int i;
496 :
497 : Assert(cl->refcount == 0);
498 : Assert(cl->my_cache == cache);
499 :
500 : /* delink from member tuples */
501 858822 : for (i = cl->n_members; --i >= 0;)
502 : {
503 594156 : CatCTup *ct = cl->members[i];
504 :
505 : Assert(ct->c_list == cl);
506 594156 : ct->c_list = NULL;
507 : /* if the member is dead and now has no references, remove it */
508 594156 : if (
509 : #ifndef CATCACHE_FORCE_RELEASE
510 594156 : ct->dead &&
511 : #endif
512 144 : ct->refcount == 0)
513 144 : CatCacheRemoveCTup(cache, ct);
514 : }
515 :
516 : /* delink from linked list */
517 264666 : dlist_delete(&cl->cache_elem);
518 :
519 : /* free associated column data */
520 264666 : CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
521 264666 : cache->cc_keyno, cl->keys);
522 :
523 264666 : pfree(cl);
524 264666 : }
525 :
526 :
527 : /*
528 : * CatCacheInvalidate
529 : *
530 : * Invalidate entries in the specified cache, given a hash value.
531 : *
532 : * We delete cache entries that match the hash value, whether positive
533 : * or negative. We don't care whether the invalidation is the result
534 : * of a tuple insertion or a deletion.
535 : *
536 : * We used to try to match positive cache entries by TID, but that is
537 : * unsafe after a VACUUM FULL on a system catalog: an inval event could
538 : * be queued before VACUUM FULL, and then processed afterwards, when the
539 : * target tuple that has to be invalidated has a different TID than it
540 : * did when the event was created. So now we just compare hash values and
541 : * accept the small risk of unnecessary invalidations due to false matches.
542 : *
543 : * This routine is only quasi-public: it should only be used by inval.c.
544 : */
545 : void
546 24452594 : CatCacheInvalidate(CatCache *cache, uint32 hashValue)
547 : {
548 : Index hashIndex;
549 : dlist_mutable_iter iter;
550 :
551 : CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
552 :
553 : /*
554 : * We don't bother to check whether the cache has finished initialization
555 : * yet; if not, there will be no entries in it so no problem.
556 : */
557 :
558 : /*
559 : * Invalidate *all* CatCLists in this cache; it's too hard to tell which
560 : * searches might still be correct, so just zap 'em all.
561 : */
562 24713142 : dlist_foreach_modify(iter, &cache->cc_lists)
563 : {
564 260548 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
565 :
566 260548 : if (cl->refcount > 0)
567 144 : cl->dead = true;
568 : else
569 260404 : CatCacheRemoveCList(cache, cl);
570 : }
571 :
572 : /*
573 : * inspect the proper hash bucket for tuple matches
574 : */
575 24452594 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
576 39656106 : dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
577 : {
578 15203512 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
579 :
580 15203512 : if (hashValue == ct->hash_value)
581 : {
582 2639310 : if (ct->refcount > 0 ||
583 2638398 : (ct->c_list && ct->c_list->refcount > 0))
584 : {
585 1056 : ct->dead = true;
586 : /* list, if any, was marked dead above */
587 1056 : Assert(ct->c_list == NULL || ct->c_list->dead);
588 : }
589 : else
590 2638254 : CatCacheRemoveCTup(cache, ct);
591 : CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
592 : #ifdef CATCACHE_STATS
593 : cache->cc_invals++;
594 : #endif
595 : /* could be multiple matches, so keep looking! */
596 : }
597 : }
598 24452594 : }
599 :
600 : /* ----------------------------------------------------------------
601 : * public functions
602 : * ----------------------------------------------------------------
603 : */
604 :
605 :
606 : /*
607 : * Standard routine for creating cache context if it doesn't exist yet
608 : *
609 : * There are a lot of places (probably far more than necessary) that check
610 : * whether CacheMemoryContext exists yet and want to create it if not.
611 : * We centralize knowledge of exactly how to create it here.
612 : */
613 : void
614 24066 : CreateCacheMemoryContext(void)
615 : {
616 : /*
617 : * Purely for paranoia, check that context doesn't exist; caller probably
618 : * did so already.
619 : */
620 24066 : if (!CacheMemoryContext)
621 24066 : CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
622 : "CacheMemoryContext",
623 : ALLOCSET_DEFAULT_SIZES);
624 24066 : }
625 :
626 :
627 : /*
628 : * ResetCatalogCache
629 : *
630 : * Reset one catalog cache to empty.
631 : *
632 : * This is not very efficient if the target cache is nearly empty.
633 : * However, it shouldn't need to be efficient; we don't invoke it often.
634 : */
635 : static void
636 322832 : ResetCatalogCache(CatCache *cache)
637 : {
638 : dlist_mutable_iter iter;
639 : int i;
640 :
641 : /* Remove each list in this cache, or at least mark it dead */
642 327088 : dlist_foreach_modify(iter, &cache->cc_lists)
643 : {
644 4256 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
645 :
646 4256 : if (cl->refcount > 0)
647 0 : cl->dead = true;
648 : else
649 4256 : CatCacheRemoveCList(cache, cl);
650 : }
651 :
652 : /* Remove each tuple in this cache, or at least mark it dead */
653 9841688 : for (i = 0; i < cache->cc_nbuckets; i++)
654 : {
655 9518856 : dlist_head *bucket = &cache->cc_bucket[i];
656 :
657 9684972 : dlist_foreach_modify(iter, bucket)
658 : {
659 166116 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
660 :
661 166116 : if (ct->refcount > 0 ||
662 166112 : (ct->c_list && ct->c_list->refcount > 0))
663 : {
664 4 : ct->dead = true;
665 : /* list, if any, was marked dead above */
666 4 : Assert(ct->c_list == NULL || ct->c_list->dead);
667 : }
668 : else
669 166112 : CatCacheRemoveCTup(cache, ct);
670 : #ifdef CATCACHE_STATS
671 : cache->cc_invals++;
672 : #endif
673 : }
674 : }
675 322832 : }
676 :
677 : /*
678 : * ResetCatalogCaches
679 : *
680 : * Reset all caches when a shared cache inval event forces it
681 : */
682 : void
683 3880 : ResetCatalogCaches(void)
684 : {
685 : slist_iter iter;
686 :
687 : CACHE_elog(DEBUG2, "ResetCatalogCaches called");
688 :
689 325920 : slist_foreach(iter, &CacheHdr->ch_caches)
690 : {
691 322040 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
692 :
693 322040 : ResetCatalogCache(cache);
694 : }
695 :
696 : CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
697 3880 : }
698 :
699 : /*
700 : * CatalogCacheFlushCatalog
701 : *
702 : * Flush all catcache entries that came from the specified system catalog.
703 : * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
704 : * tuples very likely now have different TIDs than before. (At one point
705 : * we also tried to force re-execution of CatalogCacheInitializeCache for
706 : * the cache(s) on that catalog. This is a bad idea since it leads to all
707 : * kinds of trouble if a cache flush occurs while loading cache entries.
708 : * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
709 : * rather than relying on the relcache to keep a tupdesc for us. Of course
710 : * this assumes the tupdesc of a cachable system table will not change...)
711 : */
712 : void
713 636 : CatalogCacheFlushCatalog(Oid catId)
714 : {
715 : slist_iter iter;
716 :
717 : CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
718 :
719 53424 : slist_foreach(iter, &CacheHdr->ch_caches)
720 : {
721 52788 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
722 :
723 : /* Does this cache store tuples of the target catalog? */
724 52788 : if (cache->cc_reloid == catId)
725 : {
726 : /* Yes, so flush all its contents */
727 792 : ResetCatalogCache(cache);
728 :
729 : /* Tell inval.c to call syscache callbacks for this cache */
730 792 : CallSyscacheCallbacks(cache->id, 0);
731 : }
732 : }
733 :
734 : CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
735 636 : }
736 :
737 : /*
738 : * InitCatCache
739 : *
740 : * This allocates and initializes a cache for a system catalog relation.
741 : * Actually, the cache is only partially initialized to avoid opening the
742 : * relation. The relation will be opened and the rest of the cache
743 : * structure initialized on the first access.
744 : */
745 : #ifdef CACHEDEBUG
746 : #define InitCatCache_DEBUG2 \
747 : do { \
748 : elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
749 : cp->cc_reloid, cp->cc_indexoid, cp->id, \
750 : cp->cc_nkeys, cp->cc_nbuckets); \
751 : } while(0)
752 : #else
753 : #define InitCatCache_DEBUG2
754 : #endif
755 :
756 : CatCache *
757 1997478 : InitCatCache(int id,
758 : Oid reloid,
759 : Oid indexoid,
760 : int nkeys,
761 : const int *key,
762 : int nbuckets)
763 : {
764 : CatCache *cp;
765 : MemoryContext oldcxt;
766 : int i;
767 :
768 : /*
769 : * nbuckets is the initial number of hash buckets to use in this catcache.
770 : * It will be enlarged later if it becomes too full.
771 : *
772 : * nbuckets must be a power of two. We check this via Assert rather than
773 : * a full runtime check because the values will be coming from constant
774 : * tables.
775 : *
776 : * If you're confused by the power-of-two check, see comments in
777 : * bitmapset.c for an explanation.
778 : */
779 : Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
780 :
781 : /*
782 : * first switch to the cache context so our allocations do not vanish at
783 : * the end of a transaction
784 : */
785 1997478 : if (!CacheMemoryContext)
786 0 : CreateCacheMemoryContext();
787 :
788 1997478 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
789 :
790 : /*
791 : * if first time through, initialize the cache group header
792 : */
793 1997478 : if (CacheHdr == NULL)
794 : {
795 24066 : CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
796 24066 : slist_init(&CacheHdr->ch_caches);
797 24066 : CacheHdr->ch_ntup = 0;
798 : #ifdef CATCACHE_STATS
799 : /* set up to dump stats at backend exit */
800 : on_proc_exit(CatCachePrintStats, 0);
801 : #endif
802 : }
803 :
804 : /*
805 : * Allocate a new cache structure, aligning to a cacheline boundary
806 : *
807 : * Note: we rely on zeroing to initialize all the dlist headers correctly
808 : */
809 1997478 : cp = (CatCache *) palloc_aligned(sizeof(CatCache), PG_CACHE_LINE_SIZE,
810 : MCXT_ALLOC_ZERO);
811 1997478 : cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
812 :
813 : /*
814 : * initialize the cache's relation information for the relation
815 : * corresponding to this cache, and initialize some of the new cache's
816 : * other internal fields. But don't open the relation yet.
817 : */
818 1997478 : cp->id = id;
819 1997478 : cp->cc_relname = "(not known yet)";
820 1997478 : cp->cc_reloid = reloid;
821 1997478 : cp->cc_indexoid = indexoid;
822 1997478 : cp->cc_relisshared = false; /* temporary */
823 1997478 : cp->cc_tupdesc = (TupleDesc) NULL;
824 1997478 : cp->cc_ntup = 0;
825 1997478 : cp->cc_nbuckets = nbuckets;
826 1997478 : cp->cc_nkeys = nkeys;
827 5246388 : for (i = 0; i < nkeys; ++i)
828 3248910 : cp->cc_keyno[i] = key[i];
829 :
830 : /*
831 : * new cache is initialized as far as we can go for now. print some
832 : * debugging information, if appropriate.
833 : */
834 : InitCatCache_DEBUG2;
835 :
836 : /*
837 : * add completed cache to top of group header's list
838 : */
839 1997478 : slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
840 :
841 : /*
842 : * back to the old context before we return...
843 : */
844 1997478 : MemoryContextSwitchTo(oldcxt);
845 :
846 1997478 : return cp;
847 : }
848 :
849 : /*
850 : * Enlarge a catcache, doubling the number of buckets.
851 : */
852 : static void
853 19268 : RehashCatCache(CatCache *cp)
854 : {
855 : dlist_head *newbucket;
856 : int newnbuckets;
857 : int i;
858 :
859 19268 : elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
860 : cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
861 :
862 : /* Allocate a new, larger, hash table. */
863 19268 : newnbuckets = cp->cc_nbuckets * 2;
864 19268 : newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
865 :
866 : /* Move all entries from old hash table to new. */
867 1296844 : for (i = 0; i < cp->cc_nbuckets; i++)
868 : {
869 : dlist_mutable_iter iter;
870 :
871 3851996 : dlist_foreach_modify(iter, &cp->cc_bucket[i])
872 : {
873 2574420 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
874 2574420 : int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
875 :
876 2574420 : dlist_delete(iter.cur);
877 2574420 : dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
878 : }
879 : }
880 :
881 : /* Switch to the new array. */
882 19268 : pfree(cp->cc_bucket);
883 19268 : cp->cc_nbuckets = newnbuckets;
884 19268 : cp->cc_bucket = newbucket;
885 19268 : }
886 :
887 : /*
888 : * CatalogCacheInitializeCache
889 : *
890 : * This function does final initialization of a catcache: obtain the tuple
891 : * descriptor and set up the hash and equality function links. We assume
892 : * that the relcache entry can be opened at this point!
893 : */
894 : #ifdef CACHEDEBUG
895 : #define CatalogCacheInitializeCache_DEBUG1 \
896 : elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
897 : cache->cc_reloid)
898 :
899 : #define CatalogCacheInitializeCache_DEBUG2 \
900 : do { \
901 : if (cache->cc_keyno[i] > 0) { \
902 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
903 : i+1, cache->cc_nkeys, cache->cc_keyno[i], \
904 : TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
905 : } else { \
906 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
907 : i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
908 : } \
909 : } while(0)
910 : #else
911 : #define CatalogCacheInitializeCache_DEBUG1
912 : #define CatalogCacheInitializeCache_DEBUG2
913 : #endif
914 :
915 : static void
916 531796 : CatalogCacheInitializeCache(CatCache *cache)
917 : {
918 : Relation relation;
919 : MemoryContext oldcxt;
920 : TupleDesc tupdesc;
921 : int i;
922 :
923 : CatalogCacheInitializeCache_DEBUG1;
924 :
925 531796 : relation = table_open(cache->cc_reloid, AccessShareLock);
926 :
927 : /*
928 : * switch to the cache context so our allocations do not vanish at the end
929 : * of a transaction
930 : */
931 : Assert(CacheMemoryContext != NULL);
932 :
933 531794 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
934 :
935 : /*
936 : * copy the relcache's tuple descriptor to permanent cache storage
937 : */
938 531794 : tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
939 :
940 : /*
941 : * save the relation's name and relisshared flag, too (cc_relname is used
942 : * only for debugging purposes)
943 : */
944 531794 : cache->cc_relname = pstrdup(RelationGetRelationName(relation));
945 531794 : cache->cc_relisshared = RelationGetForm(relation)->relisshared;
946 :
947 : /*
948 : * return to the caller's memory context and close the rel
949 : */
950 531794 : MemoryContextSwitchTo(oldcxt);
951 :
952 531794 : table_close(relation, AccessShareLock);
953 :
954 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
955 : cache->cc_relname, cache->cc_nkeys);
956 :
957 : /*
958 : * initialize cache's key information
959 : */
960 1381576 : for (i = 0; i < cache->cc_nkeys; ++i)
961 : {
962 : Oid keytype;
963 : RegProcedure eqfunc;
964 :
965 : CatalogCacheInitializeCache_DEBUG2;
966 :
967 849782 : if (cache->cc_keyno[i] > 0)
968 : {
969 849782 : Form_pg_attribute attr = TupleDescAttr(tupdesc,
970 : cache->cc_keyno[i] - 1);
971 :
972 849782 : keytype = attr->atttypid;
973 : /* cache key columns should always be NOT NULL */
974 : Assert(attr->attnotnull);
975 : }
976 : else
977 : {
978 0 : if (cache->cc_keyno[i] < 0)
979 0 : elog(FATAL, "sys attributes are not supported in caches");
980 0 : keytype = OIDOID;
981 : }
982 :
983 849782 : GetCCHashEqFuncs(keytype,
984 : &cache->cc_hashfunc[i],
985 : &eqfunc,
986 : &cache->cc_fastequal[i]);
987 :
988 : /*
989 : * Do equality-function lookup (we assume this won't need a catalog
990 : * lookup for any supported type)
991 : */
992 849782 : fmgr_info_cxt(eqfunc,
993 : &cache->cc_skey[i].sk_func,
994 : CacheMemoryContext);
995 :
996 : /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
997 849782 : cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
998 :
999 : /* Fill in sk_strategy as well --- always standard equality */
1000 849782 : cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
1001 849782 : cache->cc_skey[i].sk_subtype = InvalidOid;
1002 : /* If a catcache key requires a collation, it must be C collation */
1003 849782 : cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1004 :
1005 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1006 : cache->cc_relname, i, cache);
1007 : }
1008 :
1009 : /*
1010 : * mark this cache fully initialized
1011 : */
1012 531794 : cache->cc_tupdesc = tupdesc;
1013 531794 : }
1014 :
1015 : /*
1016 : * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1017 : *
1018 : * One reason to call this routine is to ensure that the relcache has
1019 : * created entries for all the catalogs and indexes referenced by catcaches.
1020 : * Therefore, provide an option to open the index as well as fixing the
1021 : * cache itself. An exception is the indexes on pg_am, which we don't use
1022 : * (cf. IndexScanOK).
1023 : */
1024 : void
1025 215578 : InitCatCachePhase2(CatCache *cache, bool touch_index)
1026 : {
1027 215578 : if (cache->cc_tupdesc == NULL)
1028 200544 : CatalogCacheInitializeCache(cache);
1029 :
1030 215576 : if (touch_index &&
1031 193214 : cache->id != AMOID &&
1032 190884 : cache->id != AMNAME)
1033 : {
1034 : Relation idesc;
1035 :
1036 : /*
1037 : * We must lock the underlying catalog before opening the index to
1038 : * avoid deadlock, since index_open could possibly result in reading
1039 : * this same catalog, and if anyone else is exclusive-locking this
1040 : * catalog and index they'll be doing it in that order.
1041 : */
1042 188554 : LockRelationOid(cache->cc_reloid, AccessShareLock);
1043 188554 : idesc = index_open(cache->cc_indexoid, AccessShareLock);
1044 :
1045 : /*
1046 : * While we've got the index open, let's check that it's unique (and
1047 : * not just deferrable-unique, thank you very much). This is just to
1048 : * catch thinkos in definitions of new catcaches, so we don't worry
1049 : * about the pg_am indexes not getting tested.
1050 : */
1051 : Assert(idesc->rd_index->indisunique &&
1052 : idesc->rd_index->indimmediate);
1053 :
1054 188552 : index_close(idesc, AccessShareLock);
1055 188552 : UnlockRelationOid(cache->cc_reloid, AccessShareLock);
1056 : }
1057 215574 : }
1058 :
1059 :
1060 : /*
1061 : * IndexScanOK
1062 : *
1063 : * This function checks for tuples that will be fetched by
1064 : * IndexSupportInitialize() during relcache initialization for
1065 : * certain system indexes that support critical syscaches.
1066 : * We can't use an indexscan to fetch these, else we'll get into
1067 : * infinite recursion. A plain heap scan will work, however.
1068 : * Once we have completed relcache initialization (signaled by
1069 : * criticalRelcachesBuilt), we don't have to worry anymore.
1070 : *
1071 : * Similarly, during backend startup we have to be able to use the
1072 : * pg_authid, pg_auth_members and pg_database syscaches for
1073 : * authentication even if we don't yet have relcache entries for those
1074 : * catalogs' indexes.
1075 : */
1076 : static bool
1077 8759034 : IndexScanOK(CatCache *cache, ScanKey cur_skey)
1078 : {
1079 8759034 : switch (cache->id)
1080 : {
1081 566880 : case INDEXRELID:
1082 :
1083 : /*
1084 : * Rather than tracking exactly which indexes have to be loaded
1085 : * before we can use indexscans (which changes from time to time),
1086 : * just force all pg_index searches to be heap scans until we've
1087 : * built the critical relcaches.
1088 : */
1089 566880 : if (!criticalRelcachesBuilt)
1090 113032 : return false;
1091 453848 : break;
1092 :
1093 47192 : case AMOID:
1094 : case AMNAME:
1095 :
1096 : /*
1097 : * Always do heap scans in pg_am, because it's so small there's
1098 : * not much point in an indexscan anyway. We *must* do this when
1099 : * initially building critical relcache entries, but we might as
1100 : * well just always do it.
1101 : */
1102 47192 : return false;
1103 :
1104 91192 : case AUTHNAME:
1105 : case AUTHOID:
1106 : case AUTHMEMMEMROLE:
1107 : case DATABASEOID:
1108 :
1109 : /*
1110 : * Protect authentication lookups occurring before relcache has
1111 : * collected entries for shared indexes.
1112 : */
1113 91192 : if (!criticalSharedRelcachesBuilt)
1114 2954 : return false;
1115 88238 : break;
1116 :
1117 8053770 : default:
1118 8053770 : break;
1119 : }
1120 :
1121 : /* Normal case, allow index scan */
1122 8595856 : return true;
1123 : }
1124 :
1125 : /*
1126 : * SearchCatCache
1127 : *
1128 : * This call searches a system cache for a tuple, opening the relation
1129 : * if necessary (on the first access to a particular cache).
1130 : *
1131 : * The result is NULL if not found, or a pointer to a HeapTuple in
1132 : * the cache. The caller must not modify the tuple, and must call
1133 : * ReleaseCatCache() when done with it.
1134 : *
1135 : * The search key values should be expressed as Datums of the key columns'
1136 : * datatype(s). (Pass zeroes for any unused parameters.) As a special
1137 : * exception, the passed-in key for a NAME column can be just a C string;
1138 : * the caller need not go to the trouble of converting it to a fully
1139 : * null-padded NAME.
1140 : */
1141 : HeapTuple
1142 8786510 : SearchCatCache(CatCache *cache,
1143 : Datum v1,
1144 : Datum v2,
1145 : Datum v3,
1146 : Datum v4)
1147 : {
1148 8786510 : return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1149 : }
1150 :
1151 :
1152 : /*
1153 : * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1154 : * arguments. The compiler can inline the body and unroll loops, making them a
1155 : * bit faster than SearchCatCache().
1156 : */
1157 :
1158 : HeapTuple
1159 80343574 : SearchCatCache1(CatCache *cache,
1160 : Datum v1)
1161 : {
1162 80343574 : return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1163 : }
1164 :
1165 :
1166 : HeapTuple
1167 8485506 : SearchCatCache2(CatCache *cache,
1168 : Datum v1, Datum v2)
1169 : {
1170 8485506 : return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1171 : }
1172 :
1173 :
1174 : HeapTuple
1175 4197180 : SearchCatCache3(CatCache *cache,
1176 : Datum v1, Datum v2, Datum v3)
1177 : {
1178 4197180 : return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1179 : }
1180 :
1181 :
1182 : HeapTuple
1183 3685946 : SearchCatCache4(CatCache *cache,
1184 : Datum v1, Datum v2, Datum v3, Datum v4)
1185 : {
1186 3685946 : return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1187 : }
1188 :
1189 : /*
1190 : * Work-horse for SearchCatCache/SearchCatCacheN.
1191 : */
1192 : static inline HeapTuple
1193 105498716 : SearchCatCacheInternal(CatCache *cache,
1194 : int nkeys,
1195 : Datum v1,
1196 : Datum v2,
1197 : Datum v3,
1198 : Datum v4)
1199 : {
1200 : Datum arguments[CATCACHE_MAXKEYS];
1201 : uint32 hashValue;
1202 : Index hashIndex;
1203 : dlist_iter iter;
1204 : dlist_head *bucket;
1205 : CatCTup *ct;
1206 :
1207 : /* Make sure we're in an xact, even if this ends up being a cache hit */
1208 : Assert(IsTransactionState());
1209 :
1210 : Assert(cache->cc_nkeys == nkeys);
1211 :
1212 : /*
1213 : * one-time startup overhead for each cache
1214 : */
1215 105498716 : if (unlikely(cache->cc_tupdesc == NULL))
1216 278840 : CatalogCacheInitializeCache(cache);
1217 :
1218 : #ifdef CATCACHE_STATS
1219 : cache->cc_searches++;
1220 : #endif
1221 :
1222 : /* Initialize local parameter array */
1223 105498716 : arguments[0] = v1;
1224 105498716 : arguments[1] = v2;
1225 105498716 : arguments[2] = v3;
1226 105498716 : arguments[3] = v4;
1227 :
1228 : /*
1229 : * find the hash bucket in which to look for the tuple
1230 : */
1231 105498716 : hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1232 105498716 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1233 :
1234 : /*
1235 : * scan the hash bucket until we find a match or exhaust our tuples
1236 : *
1237 : * Note: it's okay to use dlist_foreach here, even though we modify the
1238 : * dlist within the loop, because we don't continue the loop afterwards.
1239 : */
1240 105498716 : bucket = &cache->cc_bucket[hashIndex];
1241 115099284 : dlist_foreach(iter, bucket)
1242 : {
1243 106776048 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1244 :
1245 106776048 : if (ct->dead)
1246 2 : continue; /* ignore dead entries */
1247 :
1248 106776046 : if (ct->hash_value != hashValue)
1249 9600566 : continue; /* quickly skip entry if wrong hash val */
1250 :
1251 97175480 : if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1252 0 : continue;
1253 :
1254 : /*
1255 : * We found a match in the cache. Move it to the front of the list
1256 : * for its hashbucket, in order to speed subsequent searches. (The
1257 : * most frequently accessed elements in any hashbucket will tend to be
1258 : * near the front of the hashbucket's list.)
1259 : */
1260 97175480 : dlist_move_head(bucket, &ct->cache_elem);
1261 :
1262 : /*
1263 : * If it's a positive entry, bump its refcount and return it. If it's
1264 : * negative, we can report failure to the caller.
1265 : */
1266 97175480 : if (!ct->negative)
1267 : {
1268 90505928 : ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1269 90505928 : ct->refcount++;
1270 90505928 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1271 :
1272 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1273 : cache->cc_relname, hashIndex);
1274 :
1275 : #ifdef CATCACHE_STATS
1276 : cache->cc_hits++;
1277 : #endif
1278 :
1279 90505928 : return &ct->tuple;
1280 : }
1281 : else
1282 : {
1283 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1284 : cache->cc_relname, hashIndex);
1285 :
1286 : #ifdef CATCACHE_STATS
1287 : cache->cc_neg_hits++;
1288 : #endif
1289 :
1290 6669552 : return NULL;
1291 : }
1292 : }
1293 :
1294 8323236 : return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1295 : }
1296 :
1297 : /*
1298 : * Search the actual catalogs, rather than the cache.
1299 : *
1300 : * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1301 : * as small as possible. To avoid that effort being undone by a helpful
1302 : * compiler, try to explicitly forbid inlining.
1303 : */
1304 : static pg_noinline HeapTuple
1305 8323236 : SearchCatCacheMiss(CatCache *cache,
1306 : int nkeys,
1307 : uint32 hashValue,
1308 : Index hashIndex,
1309 : Datum v1,
1310 : Datum v2,
1311 : Datum v3,
1312 : Datum v4)
1313 : {
1314 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1315 : Relation relation;
1316 : SysScanDesc scandesc;
1317 : HeapTuple ntp;
1318 : CatCTup *ct;
1319 : Datum arguments[CATCACHE_MAXKEYS];
1320 :
1321 : /* Initialize local parameter array */
1322 8323236 : arguments[0] = v1;
1323 8323236 : arguments[1] = v2;
1324 8323236 : arguments[2] = v3;
1325 8323236 : arguments[3] = v4;
1326 :
1327 : /*
1328 : * Ok, need to make a lookup in the relation, copy the scankey and fill
1329 : * out any per-call fields.
1330 : */
1331 8323236 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1332 8323236 : cur_skey[0].sk_argument = v1;
1333 8323236 : cur_skey[1].sk_argument = v2;
1334 8323236 : cur_skey[2].sk_argument = v3;
1335 8323236 : cur_skey[3].sk_argument = v4;
1336 :
1337 : /*
1338 : * Tuple was not found in cache, so we have to try to retrieve it directly
1339 : * from the relation. If found, we will add it to the cache; if not
1340 : * found, we will add a negative cache entry instead.
1341 : *
1342 : * NOTE: it is possible for recursive cache lookups to occur while reading
1343 : * the relation --- for example, due to shared-cache-inval messages being
1344 : * processed during table_open(). This is OK. It's even possible for one
1345 : * of those lookups to find and enter the very same tuple we are trying to
1346 : * fetch here. If that happens, we will enter a second copy of the tuple
1347 : * into the cache. The first copy will never be referenced again, and
1348 : * will eventually age out of the cache, so there's no functional problem.
1349 : * This case is rare enough that it's not worth expending extra cycles to
1350 : * detect.
1351 : */
1352 8323236 : relation = table_open(cache->cc_reloid, AccessShareLock);
1353 :
1354 8323236 : scandesc = systable_beginscan(relation,
1355 : cache->cc_indexoid,
1356 8323236 : IndexScanOK(cache, cur_skey),
1357 : NULL,
1358 : nkeys,
1359 : cur_skey);
1360 :
1361 8323236 : ct = NULL;
1362 :
1363 8323236 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1364 : {
1365 4758812 : ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1366 : hashValue, hashIndex,
1367 : false);
1368 : /* immediately set the refcount to 1 */
1369 4758812 : ResourceOwnerEnlargeCatCacheRefs(CurrentResourceOwner);
1370 4758812 : ct->refcount++;
1371 4758812 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1372 4758812 : break; /* assume only one match */
1373 : }
1374 :
1375 8323232 : systable_endscan(scandesc);
1376 :
1377 8323232 : table_close(relation, AccessShareLock);
1378 :
1379 : /*
1380 : * If tuple was not found, we need to build a negative cache entry
1381 : * containing a fake tuple. The fake tuple has the correct key columns,
1382 : * but nulls everywhere else.
1383 : *
1384 : * In bootstrap mode, we don't build negative entries, because the cache
1385 : * invalidation mechanism isn't alive and can't clear them if the tuple
1386 : * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1387 : * cache inval for that.)
1388 : */
1389 8323232 : if (ct == NULL)
1390 : {
1391 3564420 : if (IsBootstrapProcessingMode())
1392 329058 : return NULL;
1393 :
1394 3235362 : ct = CatalogCacheCreateEntry(cache, NULL, arguments,
1395 : hashValue, hashIndex,
1396 : true);
1397 :
1398 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1399 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1400 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1401 : cache->cc_relname, hashIndex);
1402 :
1403 : /*
1404 : * We are not returning the negative entry to the caller, so leave its
1405 : * refcount zero.
1406 : */
1407 :
1408 3235362 : return NULL;
1409 : }
1410 :
1411 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1412 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1413 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1414 : cache->cc_relname, hashIndex);
1415 :
1416 : #ifdef CATCACHE_STATS
1417 : cache->cc_newloads++;
1418 : #endif
1419 :
1420 4758812 : return &ct->tuple;
1421 : }
1422 :
1423 : /*
1424 : * ReleaseCatCache
1425 : *
1426 : * Decrement the reference count of a catcache entry (releasing the
1427 : * hold grabbed by a successful SearchCatCache).
1428 : *
1429 : * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1430 : * will be freed as soon as their refcount goes to zero. In combination
1431 : * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1432 : * to catch references to already-released catcache entries.
1433 : */
1434 : void
1435 95264740 : ReleaseCatCache(HeapTuple tuple)
1436 : {
1437 95264740 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
1438 : offsetof(CatCTup, tuple));
1439 :
1440 : /* Safety checks to ensure we were handed a cache entry */
1441 : Assert(ct->ct_magic == CT_MAGIC);
1442 : Assert(ct->refcount > 0);
1443 :
1444 95264740 : ct->refcount--;
1445 95264740 : ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1446 :
1447 95264740 : if (
1448 : #ifndef CATCACHE_FORCE_RELEASE
1449 95264740 : ct->dead &&
1450 : #endif
1451 1026 : ct->refcount == 0 &&
1452 910 : (ct->c_list == NULL || ct->c_list->refcount == 0))
1453 910 : CatCacheRemoveCTup(ct->my_cache, ct);
1454 95264740 : }
1455 :
1456 :
1457 : /*
1458 : * GetCatCacheHashValue
1459 : *
1460 : * Compute the hash value for a given set of search keys.
1461 : *
1462 : * The reason for exposing this as part of the API is that the hash value is
1463 : * exposed in cache invalidation operations, so there are places outside the
1464 : * catcache code that need to be able to compute the hash values.
1465 : */
1466 : uint32
1467 215024 : GetCatCacheHashValue(CatCache *cache,
1468 : Datum v1,
1469 : Datum v2,
1470 : Datum v3,
1471 : Datum v4)
1472 : {
1473 : /*
1474 : * one-time startup overhead for each cache
1475 : */
1476 215024 : if (cache->cc_tupdesc == NULL)
1477 19078 : CatalogCacheInitializeCache(cache);
1478 :
1479 : /*
1480 : * calculate the hash value
1481 : */
1482 215024 : return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1483 : }
1484 :
1485 :
1486 : /*
1487 : * SearchCatCacheList
1488 : *
1489 : * Generate a list of all tuples matching a partial key (that is,
1490 : * a key specifying just the first K of the cache's N key columns).
1491 : *
1492 : * It doesn't make any sense to specify all of the cache's key columns
1493 : * here: since the key is unique, there could be at most one match, so
1494 : * you ought to use SearchCatCache() instead. Hence this function takes
1495 : * one fewer Datum argument than SearchCatCache() does.
1496 : *
1497 : * The caller must not modify the list object or the pointed-to tuples,
1498 : * and must call ReleaseCatCacheList() when done with the list.
1499 : */
1500 : CatCList *
1501 3120502 : SearchCatCacheList(CatCache *cache,
1502 : int nkeys,
1503 : Datum v1,
1504 : Datum v2,
1505 : Datum v3)
1506 : {
1507 3120502 : Datum v4 = 0; /* dummy last-column value */
1508 : Datum arguments[CATCACHE_MAXKEYS];
1509 : uint32 lHashValue;
1510 : dlist_iter iter;
1511 : CatCList *cl;
1512 : CatCTup *ct;
1513 : List *volatile ctlist;
1514 : ListCell *ctlist_item;
1515 : int nmembers;
1516 : bool ordered;
1517 : HeapTuple ntp;
1518 : MemoryContext oldcxt;
1519 : int i;
1520 :
1521 : /*
1522 : * one-time startup overhead for each cache
1523 : */
1524 3120502 : if (cache->cc_tupdesc == NULL)
1525 26070 : CatalogCacheInitializeCache(cache);
1526 :
1527 : Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1528 :
1529 : #ifdef CATCACHE_STATS
1530 : cache->cc_lsearches++;
1531 : #endif
1532 :
1533 : /* Initialize local parameter array */
1534 3120502 : arguments[0] = v1;
1535 3120502 : arguments[1] = v2;
1536 3120502 : arguments[2] = v3;
1537 3120502 : arguments[3] = v4;
1538 :
1539 : /*
1540 : * compute a hash value of the given keys for faster search. We don't
1541 : * presently divide the CatCList items into buckets, but this still lets
1542 : * us skip non-matching items quickly most of the time.
1543 : */
1544 3120502 : lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1545 :
1546 : /*
1547 : * scan the items until we find a match or exhaust our list
1548 : *
1549 : * Note: it's okay to use dlist_foreach here, even though we modify the
1550 : * dlist within the loop, because we don't continue the loop afterwards.
1551 : */
1552 23279632 : dlist_foreach(iter, &cache->cc_lists)
1553 : {
1554 22843834 : cl = dlist_container(CatCList, cache_elem, iter.cur);
1555 :
1556 22843834 : if (cl->dead)
1557 0 : continue; /* ignore dead entries */
1558 :
1559 22843834 : if (cl->hash_value != lHashValue)
1560 20159130 : continue; /* quickly skip entry if wrong hash val */
1561 :
1562 : /*
1563 : * see if the cached list matches our key.
1564 : */
1565 2684704 : if (cl->nkeys != nkeys)
1566 0 : continue;
1567 :
1568 2684704 : if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1569 0 : continue;
1570 :
1571 : /*
1572 : * We found a matching list. Move the list to the front of the
1573 : * cache's list-of-lists, to speed subsequent searches. (We do not
1574 : * move the members to the fronts of their hashbucket lists, however,
1575 : * since there's no point in that unless they are searched for
1576 : * individually.)
1577 : */
1578 2684704 : dlist_move_head(&cache->cc_lists, &cl->cache_elem);
1579 :
1580 : /* Bump the list's refcount and return it */
1581 2684704 : ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1582 2684704 : cl->refcount++;
1583 2684704 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1584 :
1585 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1586 : cache->cc_relname);
1587 :
1588 : #ifdef CATCACHE_STATS
1589 : cache->cc_lhits++;
1590 : #endif
1591 :
1592 2684704 : return cl;
1593 : }
1594 :
1595 : /*
1596 : * List was not found in cache, so we have to build it by reading the
1597 : * relation. For each matching tuple found in the relation, use an
1598 : * existing cache entry if possible, else build a new one.
1599 : *
1600 : * We have to bump the member refcounts temporarily to ensure they won't
1601 : * get dropped from the cache while loading other members. We use a PG_TRY
1602 : * block to ensure we can undo those refcounts if we get an error before
1603 : * we finish constructing the CatCList.
1604 : */
1605 435798 : ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
1606 :
1607 435798 : ctlist = NIL;
1608 :
1609 435798 : PG_TRY();
1610 : {
1611 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1612 : Relation relation;
1613 : SysScanDesc scandesc;
1614 :
1615 : /*
1616 : * Ok, need to make a lookup in the relation, copy the scankey and
1617 : * fill out any per-call fields.
1618 : */
1619 435798 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1620 435798 : cur_skey[0].sk_argument = v1;
1621 435798 : cur_skey[1].sk_argument = v2;
1622 435798 : cur_skey[2].sk_argument = v3;
1623 435798 : cur_skey[3].sk_argument = v4;
1624 :
1625 435798 : relation = table_open(cache->cc_reloid, AccessShareLock);
1626 :
1627 871596 : scandesc = systable_beginscan(relation,
1628 : cache->cc_indexoid,
1629 435798 : IndexScanOK(cache, cur_skey),
1630 : NULL,
1631 : nkeys,
1632 : cur_skey);
1633 :
1634 : /* The list will be ordered iff we are doing an index scan */
1635 435798 : ordered = (scandesc->irel != NULL);
1636 :
1637 1632528 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1638 : {
1639 : uint32 hashValue;
1640 : Index hashIndex;
1641 1196730 : bool found = false;
1642 : dlist_head *bucket;
1643 :
1644 : /*
1645 : * See if there's an entry for this tuple already.
1646 : */
1647 1196730 : ct = NULL;
1648 1196730 : hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1649 1196730 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1650 :
1651 1196730 : bucket = &cache->cc_bucket[hashIndex];
1652 1800338 : dlist_foreach(iter, bucket)
1653 : {
1654 817140 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1655 :
1656 817140 : if (ct->dead || ct->negative)
1657 730 : continue; /* ignore dead and negative entries */
1658 :
1659 816410 : if (ct->hash_value != hashValue)
1660 579010 : continue; /* quickly skip entry if wrong hash val */
1661 :
1662 237400 : if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1663 0 : continue; /* not same tuple */
1664 :
1665 : /*
1666 : * Found a match, but can't use it if it belongs to another
1667 : * list already
1668 : */
1669 237400 : if (ct->c_list)
1670 23868 : continue;
1671 :
1672 213532 : found = true;
1673 213532 : break; /* A-OK */
1674 : }
1675 :
1676 1196730 : if (!found)
1677 : {
1678 : /* We didn't find a usable entry, so make a new one */
1679 983198 : ct = CatalogCacheCreateEntry(cache, ntp, arguments,
1680 : hashValue, hashIndex,
1681 : false);
1682 : }
1683 :
1684 : /* Careful here: add entry to ctlist, then bump its refcount */
1685 : /* This way leaves state correct if lappend runs out of memory */
1686 1196730 : ctlist = lappend(ctlist, ct);
1687 1196730 : ct->refcount++;
1688 : }
1689 :
1690 435798 : systable_endscan(scandesc);
1691 :
1692 435798 : table_close(relation, AccessShareLock);
1693 :
1694 : /* Now we can build the CatCList entry. */
1695 435798 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1696 435798 : nmembers = list_length(ctlist);
1697 : cl = (CatCList *)
1698 435798 : palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
1699 :
1700 : /* Extract key values */
1701 435798 : CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
1702 435798 : arguments, cl->keys);
1703 435798 : MemoryContextSwitchTo(oldcxt);
1704 :
1705 : /*
1706 : * We are now past the last thing that could trigger an elog before we
1707 : * have finished building the CatCList and remembering it in the
1708 : * resource owner. So it's OK to fall out of the PG_TRY, and indeed
1709 : * we'd better do so before we start marking the members as belonging
1710 : * to the list.
1711 : */
1712 : }
1713 0 : PG_CATCH();
1714 : {
1715 0 : foreach(ctlist_item, ctlist)
1716 : {
1717 0 : ct = (CatCTup *) lfirst(ctlist_item);
1718 : Assert(ct->c_list == NULL);
1719 : Assert(ct->refcount > 0);
1720 0 : ct->refcount--;
1721 0 : if (
1722 : #ifndef CATCACHE_FORCE_RELEASE
1723 0 : ct->dead &&
1724 : #endif
1725 0 : ct->refcount == 0 &&
1726 0 : (ct->c_list == NULL || ct->c_list->refcount == 0))
1727 0 : CatCacheRemoveCTup(cache, ct);
1728 : }
1729 :
1730 0 : PG_RE_THROW();
1731 : }
1732 435798 : PG_END_TRY();
1733 :
1734 435798 : cl->cl_magic = CL_MAGIC;
1735 435798 : cl->my_cache = cache;
1736 435798 : cl->refcount = 0; /* for the moment */
1737 435798 : cl->dead = false;
1738 435798 : cl->ordered = ordered;
1739 435798 : cl->nkeys = nkeys;
1740 435798 : cl->hash_value = lHashValue;
1741 435798 : cl->n_members = nmembers;
1742 :
1743 435798 : i = 0;
1744 1632528 : foreach(ctlist_item, ctlist)
1745 : {
1746 1196730 : cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
1747 : Assert(ct->c_list == NULL);
1748 1196730 : ct->c_list = cl;
1749 : /* release the temporary refcount on the member */
1750 : Assert(ct->refcount > 0);
1751 1196730 : ct->refcount--;
1752 : /* mark list dead if any members already dead */
1753 1196730 : if (ct->dead)
1754 0 : cl->dead = true;
1755 : }
1756 : Assert(i == nmembers);
1757 :
1758 435798 : dlist_push_head(&cache->cc_lists, &cl->cache_elem);
1759 :
1760 : /* Finally, bump the list's refcount and return it */
1761 435798 : cl->refcount++;
1762 435798 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1763 :
1764 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
1765 : cache->cc_relname, nmembers);
1766 :
1767 435798 : return cl;
1768 : }
1769 :
1770 : /*
1771 : * ReleaseCatCacheList
1772 : *
1773 : * Decrement the reference count of a catcache list.
1774 : */
1775 : void
1776 3120502 : ReleaseCatCacheList(CatCList *list)
1777 : {
1778 : /* Safety checks to ensure we were handed a cache entry */
1779 : Assert(list->cl_magic == CL_MAGIC);
1780 : Assert(list->refcount > 0);
1781 3120502 : list->refcount--;
1782 3120502 : ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
1783 :
1784 3120502 : if (
1785 : #ifndef CATCACHE_FORCE_RELEASE
1786 3120502 : list->dead &&
1787 : #endif
1788 6 : list->refcount == 0)
1789 6 : CatCacheRemoveCList(list->my_cache, list);
1790 3120502 : }
1791 :
1792 :
1793 : /*
1794 : * CatalogCacheCreateEntry
1795 : * Create a new CatCTup entry, copying the given HeapTuple and other
1796 : * supplied data into it. The new entry initially has refcount 0.
1797 : */
1798 : static CatCTup *
1799 8977372 : CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
1800 : uint32 hashValue, Index hashIndex,
1801 : bool negative)
1802 : {
1803 : CatCTup *ct;
1804 : HeapTuple dtp;
1805 : MemoryContext oldcxt;
1806 :
1807 : /* negative entries have no tuple associated */
1808 8977372 : if (ntp)
1809 : {
1810 : int i;
1811 :
1812 : Assert(!negative);
1813 :
1814 : /*
1815 : * If there are any out-of-line toasted fields in the tuple, expand
1816 : * them in-line. This saves cycles during later use of the catcache
1817 : * entry, and also protects us against the possibility of the toast
1818 : * tuples being freed before we attempt to fetch them, in case of
1819 : * something using a slightly stale catcache entry.
1820 : */
1821 5742010 : if (HeapTupleHasExternal(ntp))
1822 6044 : dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
1823 : else
1824 5735966 : dtp = ntp;
1825 :
1826 : /* Allocate memory for CatCTup and the cached tuple in one go */
1827 5742010 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1828 :
1829 5742010 : ct = (CatCTup *) palloc(sizeof(CatCTup) +
1830 5742010 : MAXIMUM_ALIGNOF + dtp->t_len);
1831 5742010 : ct->tuple.t_len = dtp->t_len;
1832 5742010 : ct->tuple.t_self = dtp->t_self;
1833 5742010 : ct->tuple.t_tableOid = dtp->t_tableOid;
1834 5742010 : ct->tuple.t_data = (HeapTupleHeader)
1835 5742010 : MAXALIGN(((char *) ct) + sizeof(CatCTup));
1836 : /* copy tuple contents */
1837 5742010 : memcpy((char *) ct->tuple.t_data,
1838 5742010 : (const char *) dtp->t_data,
1839 5742010 : dtp->t_len);
1840 5742010 : MemoryContextSwitchTo(oldcxt);
1841 :
1842 5742010 : if (dtp != ntp)
1843 6044 : heap_freetuple(dtp);
1844 :
1845 : /* extract keys - they'll point into the tuple if not by-value */
1846 16680166 : for (i = 0; i < cache->cc_nkeys; i++)
1847 : {
1848 : Datum atp;
1849 : bool isnull;
1850 :
1851 10938156 : atp = heap_getattr(&ct->tuple,
1852 : cache->cc_keyno[i],
1853 : cache->cc_tupdesc,
1854 : &isnull);
1855 : Assert(!isnull);
1856 10938156 : ct->keys[i] = atp;
1857 : }
1858 : }
1859 : else
1860 : {
1861 : Assert(negative);
1862 3235362 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1863 3235362 : ct = (CatCTup *) palloc(sizeof(CatCTup));
1864 :
1865 : /*
1866 : * Store keys - they'll point into separately allocated memory if not
1867 : * by-value.
1868 : */
1869 3235362 : CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
1870 3235362 : arguments, ct->keys);
1871 3235362 : MemoryContextSwitchTo(oldcxt);
1872 : }
1873 :
1874 : /*
1875 : * Finish initializing the CatCTup header, and add it to the cache's
1876 : * linked list and counts.
1877 : */
1878 8977372 : ct->ct_magic = CT_MAGIC;
1879 8977372 : ct->my_cache = cache;
1880 8977372 : ct->c_list = NULL;
1881 8977372 : ct->refcount = 0; /* for the moment */
1882 8977372 : ct->dead = false;
1883 8977372 : ct->negative = negative;
1884 8977372 : ct->hash_value = hashValue;
1885 :
1886 8977372 : dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
1887 :
1888 8977372 : cache->cc_ntup++;
1889 8977372 : CacheHdr->ch_ntup++;
1890 :
1891 : /*
1892 : * If the hash table has become too full, enlarge the buckets array. Quite
1893 : * arbitrarily, we enlarge when fill factor > 2.
1894 : */
1895 8977372 : if (cache->cc_ntup > cache->cc_nbuckets * 2)
1896 19268 : RehashCatCache(cache);
1897 :
1898 8977372 : return ct;
1899 : }
1900 :
1901 : /*
1902 : * Helper routine that frees keys stored in the keys array.
1903 : */
1904 : static void
1905 1576200 : CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
1906 : {
1907 : int i;
1908 :
1909 5242900 : for (i = 0; i < nkeys; i++)
1910 : {
1911 3666700 : int attnum = attnos[i];
1912 : Form_pg_attribute att;
1913 :
1914 : /* system attribute are not supported in caches */
1915 : Assert(attnum > 0);
1916 :
1917 3666700 : att = TupleDescAttr(tupdesc, attnum - 1);
1918 :
1919 3666700 : if (!att->attbyval)
1920 1310388 : pfree(DatumGetPointer(keys[i]));
1921 : }
1922 1576200 : }
1923 :
1924 : /*
1925 : * Helper routine that copies the keys in the srckeys array into the dstkeys
1926 : * one, guaranteeing that the datums are fully allocated in the current memory
1927 : * context.
1928 : */
1929 : static void
1930 3671160 : CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
1931 : Datum *srckeys, Datum *dstkeys)
1932 : {
1933 : int i;
1934 :
1935 : /*
1936 : * XXX: memory and lookup performance could possibly be improved by
1937 : * storing all keys in one allocation.
1938 : */
1939 :
1940 12142556 : for (i = 0; i < nkeys; i++)
1941 : {
1942 8471396 : int attnum = attnos[i];
1943 8471396 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
1944 8471396 : Datum src = srckeys[i];
1945 : NameData srcname;
1946 :
1947 : /*
1948 : * Must be careful in case the caller passed a C string where a NAME
1949 : * is wanted: convert the given argument to a correctly padded NAME.
1950 : * Otherwise the memcpy() done by datumCopy() could fall off the end
1951 : * of memory.
1952 : */
1953 8471396 : if (att->atttypid == NAMEOID)
1954 : {
1955 2057874 : namestrcpy(&srcname, DatumGetCString(src));
1956 2057874 : src = NameGetDatum(&srcname);
1957 : }
1958 :
1959 8471396 : dstkeys[i] = datumCopy(src,
1960 8471396 : att->attbyval,
1961 8471396 : att->attlen);
1962 : }
1963 3671160 : }
1964 :
1965 : /*
1966 : * PrepareToInvalidateCacheTuple()
1967 : *
1968 : * This is part of a rather subtle chain of events, so pay attention:
1969 : *
1970 : * When a tuple is inserted or deleted, it cannot be flushed from the
1971 : * catcaches immediately, for reasons explained at the top of cache/inval.c.
1972 : * Instead we have to add entry(s) for the tuple to a list of pending tuple
1973 : * invalidations that will be done at the end of the command or transaction.
1974 : *
1975 : * The lists of tuples that need to be flushed are kept by inval.c. This
1976 : * routine is a helper routine for inval.c. Given a tuple belonging to
1977 : * the specified relation, find all catcaches it could be in, compute the
1978 : * correct hash value for each such catcache, and call the specified
1979 : * function to record the cache id and hash value in inval.c's lists.
1980 : * SysCacheInvalidate will be called later, if appropriate,
1981 : * using the recorded information.
1982 : *
1983 : * For an insert or delete, tuple is the target tuple and newtuple is NULL.
1984 : * For an update, we are called just once, with tuple being the old tuple
1985 : * version and newtuple the new version. We should make two list entries
1986 : * if the tuple's hash value changed, but only one if it didn't.
1987 : *
1988 : * Note that it is irrelevant whether the given tuple is actually loaded
1989 : * into the catcache at the moment. Even if it's not there now, it might
1990 : * be by the end of the command, or there might be a matching negative entry
1991 : * to flush --- or other backends' caches might have such entries --- so
1992 : * we have to make list entries to flush it later.
1993 : *
1994 : * Also note that it's not an error if there are no catcaches for the
1995 : * specified relation. inval.c doesn't know exactly which rels have
1996 : * catcaches --- it will call this routine for any tuple that's in a
1997 : * system relation.
1998 : */
1999 : void
2000 5030962 : PrepareToInvalidateCacheTuple(Relation relation,
2001 : HeapTuple tuple,
2002 : HeapTuple newtuple,
2003 : void (*function) (int, uint32, Oid))
2004 : {
2005 : slist_iter iter;
2006 : Oid reloid;
2007 :
2008 : CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2009 :
2010 : /*
2011 : * sanity checks
2012 : */
2013 : Assert(RelationIsValid(relation));
2014 : Assert(HeapTupleIsValid(tuple));
2015 : Assert(PointerIsValid(function));
2016 : Assert(CacheHdr != NULL);
2017 :
2018 5030962 : reloid = RelationGetRelid(relation);
2019 :
2020 : /* ----------------
2021 : * for each cache
2022 : * if the cache contains tuples from the specified relation
2023 : * compute the tuple's hash value(s) in this cache,
2024 : * and call the passed function to register the information.
2025 : * ----------------
2026 : */
2027 :
2028 422600808 : slist_foreach(iter, &CacheHdr->ch_caches)
2029 : {
2030 417569846 : CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2031 : uint32 hashvalue;
2032 : Oid dbid;
2033 :
2034 417569846 : if (ccp->cc_reloid != reloid)
2035 408824654 : continue;
2036 :
2037 : /* Just in case cache hasn't finished initialization yet... */
2038 8745192 : if (ccp->cc_tupdesc == NULL)
2039 7264 : CatalogCacheInitializeCache(ccp);
2040 :
2041 8745192 : hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2042 8745192 : dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2043 :
2044 8745192 : (*function) (ccp->id, hashvalue, dbid);
2045 :
2046 8745192 : if (newtuple)
2047 : {
2048 : uint32 newhashvalue;
2049 :
2050 877808 : newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2051 :
2052 877808 : if (newhashvalue != hashvalue)
2053 5296 : (*function) (ccp->id, newhashvalue, dbid);
2054 : }
2055 : }
2056 5030962 : }
2057 :
2058 :
2059 : /*
2060 : * Subroutines for warning about reference leaks. These are exported so
2061 : * that resowner.c can call them.
2062 : */
2063 : void
2064 0 : PrintCatCacheLeakWarning(HeapTuple tuple)
2065 : {
2066 0 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
2067 : offsetof(CatCTup, tuple));
2068 :
2069 : /* Safety check to ensure we were handed a cache entry */
2070 : Assert(ct->ct_magic == CT_MAGIC);
2071 :
2072 0 : elog(WARNING, "cache reference leak: cache %s (%d), tuple %u/%u has count %d",
2073 : ct->my_cache->cc_relname, ct->my_cache->id,
2074 : ItemPointerGetBlockNumber(&(tuple->t_self)),
2075 : ItemPointerGetOffsetNumber(&(tuple->t_self)),
2076 : ct->refcount);
2077 0 : }
2078 :
2079 : void
2080 0 : PrintCatCacheListLeakWarning(CatCList *list)
2081 : {
2082 0 : elog(WARNING, "cache reference leak: cache %s (%d), list %p has count %d",
2083 : list->my_cache->cc_relname, list->my_cache->id,
2084 : list, list->refcount);
2085 0 : }
|