Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * catcache.c
4 : * System catalog cache for tuples matching a key.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/utils/cache/catcache.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include "access/genam.h"
18 : #include "access/heaptoast.h"
19 : #include "access/relscan.h"
20 : #include "access/table.h"
21 : #include "access/xact.h"
22 : #include "catalog/catalog.h"
23 : #include "catalog/pg_collation.h"
24 : #include "catalog/pg_type.h"
25 : #include "common/hashfn.h"
26 : #include "common/pg_prng.h"
27 : #include "miscadmin.h"
28 : #include "port/pg_bitutils.h"
29 : #ifdef CATCACHE_STATS
30 : #include "storage/ipc.h" /* for on_proc_exit */
31 : #endif
32 : #include "storage/lmgr.h"
33 : #include "utils/builtins.h"
34 : #include "utils/catcache.h"
35 : #include "utils/datum.h"
36 : #include "utils/fmgroids.h"
37 : #include "utils/injection_point.h"
38 : #include "utils/inval.h"
39 : #include "utils/memutils.h"
40 : #include "utils/rel.h"
41 : #include "utils/resowner.h"
42 : #include "utils/syscache.h"
43 :
44 : /*
45 : * If a catcache invalidation is processed while we are in the middle of
46 : * creating a catcache entry (or list), it might apply to the entry we're
47 : * creating, making it invalid before it's been inserted to the catcache. To
48 : * catch such cases, we have a stack of "create-in-progress" entries. Cache
49 : * invalidation marks any matching entries in the stack as dead, in addition
50 : * to the actual CatCTup and CatCList entries.
51 : */
52 : typedef struct CatCInProgress
53 : {
54 : CatCache *cache; /* cache that the entry belongs to */
55 : uint32 hash_value; /* hash of the entry; ignored for lists */
56 : bool list; /* is it a list entry? */
57 : bool dead; /* set when the entry is invalidated */
58 : struct CatCInProgress *next;
59 : } CatCInProgress;
60 :
61 : static CatCInProgress *catcache_in_progress_stack = NULL;
62 :
63 : /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
64 :
65 : /*
66 : * Given a hash value and the size of the hash table, find the bucket
67 : * in which the hash value belongs. Since the hash table must contain
68 : * a power-of-2 number of elements, this is a simple bitmask.
69 : */
70 : #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
71 :
72 :
73 : /*
74 : * variables, macros and other stuff
75 : */
76 :
77 : #ifdef CACHEDEBUG
78 : #define CACHE_elog(...) elog(__VA_ARGS__)
79 : #else
80 : #define CACHE_elog(...)
81 : #endif
82 :
83 : /* Cache management header --- pointer is NULL until created */
84 : static CatCacheHeader *CacheHdr = NULL;
85 :
86 : static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
87 : int nkeys,
88 : Datum v1, Datum v2,
89 : Datum v3, Datum v4);
90 :
91 : static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
92 : int nkeys,
93 : uint32 hashValue,
94 : Index hashIndex,
95 : Datum v1, Datum v2,
96 : Datum v3, Datum v4);
97 :
98 : static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
99 : Datum v1, Datum v2, Datum v3, Datum v4);
100 : static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
101 : HeapTuple tuple);
102 : static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
103 : const Datum *cachekeys,
104 : const Datum *searchkeys);
105 :
106 : #ifdef CATCACHE_STATS
107 : static void CatCachePrintStats(int code, Datum arg);
108 : #endif
109 : static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
110 : static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
111 : static void RehashCatCache(CatCache *cp);
112 : static void RehashCatCacheLists(CatCache *cp);
113 : static void CatalogCacheInitializeCache(CatCache *cache);
114 : static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
115 : Datum *arguments,
116 : uint32 hashValue, Index hashIndex);
117 :
118 : static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner);
119 : static void ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner);
120 : static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, const int *attnos,
121 : const Datum *keys);
122 : static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, const int *attnos,
123 : const Datum *srckeys, Datum *dstkeys);
124 :
125 :
126 : /*
127 : * internal support functions
128 : */
129 :
130 : /* ResourceOwner callbacks to hold catcache references */
131 :
132 : static void ResOwnerReleaseCatCache(Datum res);
133 : static char *ResOwnerPrintCatCache(Datum res);
134 : static void ResOwnerReleaseCatCacheList(Datum res);
135 : static char *ResOwnerPrintCatCacheList(Datum res);
136 :
137 : static const ResourceOwnerDesc catcache_resowner_desc =
138 : {
139 : /* catcache references */
140 : .name = "catcache reference",
141 : .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
142 : .release_priority = RELEASE_PRIO_CATCACHE_REFS,
143 : .ReleaseResource = ResOwnerReleaseCatCache,
144 : .DebugPrint = ResOwnerPrintCatCache
145 : };
146 :
147 : static const ResourceOwnerDesc catlistref_resowner_desc =
148 : {
149 : /* catcache-list pins */
150 : .name = "catcache list reference",
151 : .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
152 : .release_priority = RELEASE_PRIO_CATCACHE_LIST_REFS,
153 : .ReleaseResource = ResOwnerReleaseCatCacheList,
154 : .DebugPrint = ResOwnerPrintCatCacheList
155 : };
156 :
157 : /* Convenience wrappers over ResourceOwnerRemember/Forget */
158 : static inline void
159 89794350 : ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
160 : {
161 89794350 : ResourceOwnerRemember(owner, PointerGetDatum(tuple), &catcache_resowner_desc);
162 89794350 : }
163 : static inline void
164 89783456 : ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
165 : {
166 89783456 : ResourceOwnerForget(owner, PointerGetDatum(tuple), &catcache_resowner_desc);
167 89783456 : }
168 : static inline void
169 3951280 : ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
170 : {
171 3951280 : ResourceOwnerRemember(owner, PointerGetDatum(list), &catlistref_resowner_desc);
172 3951280 : }
173 : static inline void
174 3951244 : ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
175 : {
176 3951244 : ResourceOwnerForget(owner, PointerGetDatum(list), &catlistref_resowner_desc);
177 3951244 : }
178 :
179 :
180 : /*
181 : * Hash and equality functions for system types that are used as cache key
182 : * fields. In some cases, we just call the regular SQL-callable functions for
183 : * the appropriate data type, but that tends to be a little slow, and the
184 : * speed of these functions is performance-critical. Therefore, for data
185 : * types that frequently occur as catcache keys, we hard-code the logic here.
186 : * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
187 : * in certain cases (like int4) we can adopt a faster hash algorithm as well.
188 : */
189 :
190 : static bool
191 5965518 : chareqfast(Datum a, Datum b)
192 : {
193 5965518 : return DatumGetChar(a) == DatumGetChar(b);
194 : }
195 :
196 : static uint32
197 6782030 : charhashfast(Datum datum)
198 : {
199 6782030 : return murmurhash32((int32) DatumGetChar(datum));
200 : }
201 :
202 : static bool
203 3945762 : nameeqfast(Datum a, Datum b)
204 : {
205 3945762 : char *ca = NameStr(*DatumGetName(a));
206 3945762 : char *cb = NameStr(*DatumGetName(b));
207 :
208 3945762 : return strncmp(ca, cb, NAMEDATALEN) == 0;
209 : }
210 :
211 : static uint32
212 9104544 : namehashfast(Datum datum)
213 : {
214 9104544 : char *key = NameStr(*DatumGetName(datum));
215 :
216 9104544 : return hash_bytes((unsigned char *) key, strlen(key));
217 : }
218 :
219 : static bool
220 9521376 : int2eqfast(Datum a, Datum b)
221 : {
222 9521376 : return DatumGetInt16(a) == DatumGetInt16(b);
223 : }
224 :
225 : static uint32
226 13184918 : int2hashfast(Datum datum)
227 : {
228 13184918 : return murmurhash32((int32) DatumGetInt16(datum));
229 : }
230 :
231 : static bool
232 104584514 : int4eqfast(Datum a, Datum b)
233 : {
234 104584514 : return DatumGetInt32(a) == DatumGetInt32(b);
235 : }
236 :
237 : static uint32
238 122766840 : int4hashfast(Datum datum)
239 : {
240 122766840 : return murmurhash32((int32) DatumGetInt32(datum));
241 : }
242 :
243 : static bool
244 166 : texteqfast(Datum a, Datum b)
245 : {
246 : /*
247 : * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
248 : * want to take the fast "deterministic" path in texteq().
249 : */
250 166 : return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
251 : }
252 :
253 : static uint32
254 3868 : texthashfast(Datum datum)
255 : {
256 : /* analogously here as in texteqfast() */
257 3868 : return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
258 : }
259 :
260 : static bool
261 3112 : oidvectoreqfast(Datum a, Datum b)
262 : {
263 3112 : return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b));
264 : }
265 :
266 : static uint32
267 409874 : oidvectorhashfast(Datum datum)
268 : {
269 409874 : return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum));
270 : }
271 :
272 : /* Lookup support functions for a type. */
273 : static void
274 1274006 : GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
275 : {
276 1274006 : switch (keytype)
277 : {
278 16600 : case BOOLOID:
279 16600 : *hashfunc = charhashfast;
280 16600 : *fasteqfunc = chareqfast;
281 16600 : *eqfunc = F_BOOLEQ;
282 16600 : break;
283 21626 : case CHAROID:
284 21626 : *hashfunc = charhashfast;
285 21626 : *fasteqfunc = chareqfast;
286 21626 : *eqfunc = F_CHAREQ;
287 21626 : break;
288 237898 : case NAMEOID:
289 237898 : *hashfunc = namehashfast;
290 237898 : *fasteqfunc = nameeqfast;
291 237898 : *eqfunc = F_NAMEEQ;
292 237898 : break;
293 72910 : case INT2OID:
294 72910 : *hashfunc = int2hashfast;
295 72910 : *fasteqfunc = int2eqfast;
296 72910 : *eqfunc = F_INT2EQ;
297 72910 : break;
298 19986 : case INT4OID:
299 19986 : *hashfunc = int4hashfast;
300 19986 : *fasteqfunc = int4eqfast;
301 19986 : *eqfunc = F_INT4EQ;
302 19986 : break;
303 8892 : case TEXTOID:
304 8892 : *hashfunc = texthashfast;
305 8892 : *fasteqfunc = texteqfast;
306 8892 : *eqfunc = F_TEXTEQ;
307 8892 : break;
308 878960 : case OIDOID:
309 : case REGPROCOID:
310 : case REGPROCEDUREOID:
311 : case REGOPEROID:
312 : case REGOPERATOROID:
313 : case REGCLASSOID:
314 : case REGTYPEOID:
315 : case REGCOLLATIONOID:
316 : case REGCONFIGOID:
317 : case REGDICTIONARYOID:
318 : case REGROLEOID:
319 : case REGNAMESPACEOID:
320 : case REGDATABASEOID:
321 878960 : *hashfunc = int4hashfast;
322 878960 : *fasteqfunc = int4eqfast;
323 878960 : *eqfunc = F_OIDEQ;
324 878960 : break;
325 17134 : case OIDVECTOROID:
326 17134 : *hashfunc = oidvectorhashfast;
327 17134 : *fasteqfunc = oidvectoreqfast;
328 17134 : *eqfunc = F_OIDVECTOREQ;
329 17134 : break;
330 0 : default:
331 0 : elog(FATAL, "type %u not supported as catcache key", keytype);
332 : *hashfunc = NULL; /* keep compiler quiet */
333 :
334 : *eqfunc = InvalidOid;
335 : break;
336 : }
337 1274006 : }
338 :
339 : /*
340 : * CatalogCacheComputeHashValue
341 : *
342 : * Compute the hash value associated with a given set of lookup keys
343 : */
344 : static uint32
345 108442866 : CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
346 : Datum v1, Datum v2, Datum v3, Datum v4)
347 : {
348 108442866 : uint32 hashValue = 0;
349 : uint32 oneHash;
350 108442866 : CCHashFN *cc_hashfunc = cache->cc_hashfunc;
351 :
352 : CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
353 : cache->cc_relname, nkeys, cache);
354 :
355 108442866 : switch (nkeys)
356 : {
357 5049468 : case 4:
358 5049468 : oneHash = (cc_hashfunc[3]) (v4);
359 5049468 : hashValue ^= pg_rotate_left32(oneHash, 24);
360 : /* FALLTHROUGH */
361 12692078 : case 3:
362 12692078 : oneHash = (cc_hashfunc[2]) (v3);
363 12692078 : hashValue ^= pg_rotate_left32(oneHash, 16);
364 : /* FALLTHROUGH */
365 26068564 : case 2:
366 26068564 : oneHash = (cc_hashfunc[1]) (v2);
367 26068564 : hashValue ^= pg_rotate_left32(oneHash, 8);
368 : /* FALLTHROUGH */
369 108442866 : case 1:
370 108442866 : oneHash = (cc_hashfunc[0]) (v1);
371 108442866 : hashValue ^= oneHash;
372 108442866 : break;
373 0 : default:
374 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
375 : break;
376 : }
377 :
378 108442866 : return hashValue;
379 : }
380 :
381 : /*
382 : * CatalogCacheComputeTupleHashValue
383 : *
384 : * Compute the hash value associated with a given tuple to be cached
385 : */
386 : static uint32
387 7446522 : CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
388 : {
389 7446522 : Datum v1 = 0,
390 7446522 : v2 = 0,
391 7446522 : v3 = 0,
392 7446522 : v4 = 0;
393 7446522 : bool isNull = false;
394 7446522 : int *cc_keyno = cache->cc_keyno;
395 7446522 : TupleDesc cc_tupdesc = cache->cc_tupdesc;
396 :
397 : /* Now extract key fields from tuple, insert into scankey */
398 7446522 : switch (nkeys)
399 : {
400 471862 : case 4:
401 471862 : v4 = fastgetattr(tuple,
402 471862 : cc_keyno[3],
403 : cc_tupdesc,
404 : &isNull);
405 : Assert(!isNull);
406 : /* FALLTHROUGH */
407 1340550 : case 3:
408 1340550 : v3 = fastgetattr(tuple,
409 1340550 : cc_keyno[2],
410 : cc_tupdesc,
411 : &isNull);
412 : Assert(!isNull);
413 : /* FALLTHROUGH */
414 5391690 : case 2:
415 5391690 : v2 = fastgetattr(tuple,
416 5391690 : cc_keyno[1],
417 : cc_tupdesc,
418 : &isNull);
419 : Assert(!isNull);
420 : /* FALLTHROUGH */
421 7446522 : case 1:
422 7446522 : v1 = fastgetattr(tuple,
423 : cc_keyno[0],
424 : cc_tupdesc,
425 : &isNull);
426 : Assert(!isNull);
427 7446522 : break;
428 0 : default:
429 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
430 : break;
431 : }
432 :
433 7446522 : return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
434 : }
435 :
436 : /*
437 : * CatalogCacheCompareTuple
438 : *
439 : * Compare a tuple to the passed arguments.
440 : */
441 : static inline bool
442 92853242 : CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
443 : const Datum *cachekeys,
444 : const Datum *searchkeys)
445 : {
446 92853242 : const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
447 : int i;
448 :
449 216873870 : for (i = 0; i < nkeys; i++)
450 : {
451 124020628 : if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
452 0 : return false;
453 : }
454 92853242 : return true;
455 : }
456 :
457 :
458 : #ifdef CATCACHE_STATS
459 :
460 : static void
461 : CatCachePrintStats(int code, Datum arg)
462 : {
463 : slist_iter iter;
464 : uint64 cc_searches = 0;
465 : uint64 cc_hits = 0;
466 : uint64 cc_neg_hits = 0;
467 : uint64 cc_newloads = 0;
468 : uint64 cc_invals = 0;
469 : uint64 cc_nlists = 0;
470 : uint64 cc_lsearches = 0;
471 : uint64 cc_lhits = 0;
472 :
473 : slist_foreach(iter, &CacheHdr->ch_caches)
474 : {
475 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
476 :
477 : if (cache->cc_ntup == 0 && cache->cc_searches == 0)
478 : continue; /* don't print unused caches */
479 : elog(DEBUG2, "catcache %s/%u: %d tup, %" PRIu64 " srch, %" PRIu64 "+%"
480 : PRIu64 "=%" PRIu64 " hits, %" PRIu64 "+%" PRIu64 "=%"
481 : PRIu64 " loads, %" PRIu64 " invals, %d lists, %" PRIu64
482 : " lsrch, %" PRIu64 " lhits",
483 : cache->cc_relname,
484 : cache->cc_indexoid,
485 : cache->cc_ntup,
486 : cache->cc_searches,
487 : cache->cc_hits,
488 : cache->cc_neg_hits,
489 : cache->cc_hits + cache->cc_neg_hits,
490 : cache->cc_newloads,
491 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
492 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
493 : cache->cc_invals,
494 : cache->cc_nlist,
495 : cache->cc_lsearches,
496 : cache->cc_lhits);
497 : cc_searches += cache->cc_searches;
498 : cc_hits += cache->cc_hits;
499 : cc_neg_hits += cache->cc_neg_hits;
500 : cc_newloads += cache->cc_newloads;
501 : cc_invals += cache->cc_invals;
502 : cc_nlists += cache->cc_nlist;
503 : cc_lsearches += cache->cc_lsearches;
504 : cc_lhits += cache->cc_lhits;
505 : }
506 : elog(DEBUG2, "catcache totals: %d tup, %" PRIu64 " srch, %" PRIu64 "+%"
507 : PRIu64 "=%" PRIu64 " hits, %" PRIu64 "+%" PRIu64 "=%" PRIu64
508 : " loads, %" PRIu64 " invals, %" PRIu64 " lists, %" PRIu64
509 : " lsrch, %" PRIu64 " lhits",
510 : CacheHdr->ch_ntup,
511 : cc_searches,
512 : cc_hits,
513 : cc_neg_hits,
514 : cc_hits + cc_neg_hits,
515 : cc_newloads,
516 : cc_searches - cc_hits - cc_neg_hits - cc_newloads,
517 : cc_searches - cc_hits - cc_neg_hits,
518 : cc_invals,
519 : cc_nlists,
520 : cc_lsearches,
521 : cc_lhits);
522 : }
523 : #endif /* CATCACHE_STATS */
524 :
525 :
526 : /*
527 : * CatCacheRemoveCTup
528 : *
529 : * Unlink and delete the given cache entry
530 : *
531 : * NB: if it is a member of a CatCList, the CatCList is deleted too.
532 : * Both the cache entry and the list had better have zero refcount.
533 : */
534 : static void
535 1604212 : CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
536 : {
537 : Assert(ct->refcount == 0);
538 : Assert(ct->my_cache == cache);
539 :
540 1604212 : if (ct->c_list)
541 : {
542 : /*
543 : * The cleanest way to handle this is to call CatCacheRemoveCList,
544 : * which will recurse back to me, and the recursive call will do the
545 : * work. Set the "dead" flag to make sure it does recurse.
546 : */
547 0 : ct->dead = true;
548 0 : CatCacheRemoveCList(cache, ct->c_list);
549 0 : return; /* nothing left to do */
550 : }
551 :
552 : /* delink from linked list */
553 1604212 : dlist_delete(&ct->cache_elem);
554 :
555 : /*
556 : * Free keys when we're dealing with a negative entry, normal entries just
557 : * point into tuple, allocated together with the CatCTup.
558 : */
559 1604212 : if (ct->negative)
560 461138 : CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
561 461138 : cache->cc_keyno, ct->keys);
562 :
563 1604212 : pfree(ct);
564 :
565 1604212 : --cache->cc_ntup;
566 1604212 : --CacheHdr->ch_ntup;
567 : }
568 :
569 : /*
570 : * CatCacheRemoveCList
571 : *
572 : * Unlink and delete the given cache list entry
573 : *
574 : * NB: any dead member entries that become unreferenced are deleted too.
575 : */
576 : static void
577 134688 : CatCacheRemoveCList(CatCache *cache, CatCList *cl)
578 : {
579 : int i;
580 :
581 : Assert(cl->refcount == 0);
582 : Assert(cl->my_cache == cache);
583 :
584 : /* delink from member tuples */
585 446516 : for (i = cl->n_members; --i >= 0;)
586 : {
587 311828 : CatCTup *ct = cl->members[i];
588 :
589 : Assert(ct->c_list == cl);
590 311828 : ct->c_list = NULL;
591 : /* if the member is dead and now has no references, remove it */
592 311828 : if (
593 : #ifndef CATCACHE_FORCE_RELEASE
594 311828 : ct->dead &&
595 : #endif
596 144 : ct->refcount == 0)
597 144 : CatCacheRemoveCTup(cache, ct);
598 : }
599 :
600 : /* delink from linked list */
601 134688 : dlist_delete(&cl->cache_elem);
602 :
603 : /* free associated column data */
604 134688 : CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
605 134688 : cache->cc_keyno, cl->keys);
606 :
607 134688 : pfree(cl);
608 :
609 134688 : --cache->cc_nlist;
610 134688 : }
611 :
612 :
613 : /*
614 : * CatCacheInvalidate
615 : *
616 : * Invalidate entries in the specified cache, given a hash value.
617 : *
618 : * We delete cache entries that match the hash value, whether positive
619 : * or negative. We don't care whether the invalidation is the result
620 : * of a tuple insertion or a deletion.
621 : *
622 : * We used to try to match positive cache entries by TID, but that is
623 : * unsafe after a VACUUM FULL on a system catalog: an inval event could
624 : * be queued before VACUUM FULL, and then processed afterwards, when the
625 : * target tuple that has to be invalidated has a different TID than it
626 : * did when the event was created. So now we just compare hash values and
627 : * accept the small risk of unnecessary invalidations due to false matches.
628 : *
629 : * This routine is only quasi-public: it should only be used by inval.c.
630 : */
631 : void
632 22262544 : CatCacheInvalidate(CatCache *cache, uint32 hashValue)
633 : {
634 : Index hashIndex;
635 : dlist_mutable_iter iter;
636 :
637 : CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
638 :
639 : /*
640 : * We don't bother to check whether the cache has finished initialization
641 : * yet; if not, there will be no entries in it so no problem.
642 : */
643 :
644 : /*
645 : * Invalidate *all* CatCLists in this cache; it's too hard to tell which
646 : * searches might still be correct, so just zap 'em all.
647 : */
648 26901552 : for (int i = 0; i < cache->cc_nlbuckets; i++)
649 : {
650 4639008 : dlist_head *bucket = &cache->cc_lbucket[i];
651 :
652 4768802 : dlist_foreach_modify(iter, bucket)
653 : {
654 129794 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
655 :
656 129794 : if (cl->refcount > 0)
657 144 : cl->dead = true;
658 : else
659 129650 : CatCacheRemoveCList(cache, cl);
660 : }
661 : }
662 :
663 : /*
664 : * inspect the proper hash bucket for tuple matches
665 : */
666 22262544 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
667 30540958 : dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
668 : {
669 8278414 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
670 :
671 8278414 : if (hashValue == ct->hash_value)
672 : {
673 1407620 : if (ct->refcount > 0 ||
674 1406112 : (ct->c_list && ct->c_list->refcount > 0))
675 : {
676 1652 : ct->dead = true;
677 : /* list, if any, was marked dead above */
678 1652 : Assert(ct->c_list == NULL || ct->c_list->dead);
679 : }
680 : else
681 1405968 : CatCacheRemoveCTup(cache, ct);
682 : CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
683 : #ifdef CATCACHE_STATS
684 : cache->cc_invals++;
685 : #endif
686 : /* could be multiple matches, so keep looking! */
687 : }
688 : }
689 :
690 : /* Also invalidate any entries that are being built */
691 22425180 : for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
692 : {
693 162636 : if (e->cache == cache)
694 : {
695 528 : if (e->list || e->hash_value == hashValue)
696 524 : e->dead = true;
697 : }
698 : }
699 22262544 : }
700 :
701 : /* ----------------------------------------------------------------
702 : * public functions
703 : * ----------------------------------------------------------------
704 : */
705 :
706 :
707 : /*
708 : * Standard routine for creating cache context if it doesn't exist yet
709 : *
710 : * There are a lot of places (probably far more than necessary) that check
711 : * whether CacheMemoryContext exists yet and want to create it if not.
712 : * We centralize knowledge of exactly how to create it here.
713 : */
714 : void
715 36388 : CreateCacheMemoryContext(void)
716 : {
717 : /*
718 : * Purely for paranoia, check that context doesn't exist; caller probably
719 : * did so already.
720 : */
721 36388 : if (!CacheMemoryContext)
722 36388 : CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
723 : "CacheMemoryContext",
724 : ALLOCSET_DEFAULT_SIZES);
725 36388 : }
726 :
727 :
728 : /*
729 : * ResetCatalogCache
730 : *
731 : * Reset one catalog cache to empty.
732 : *
733 : * This is not very efficient if the target cache is nearly empty.
734 : * However, it shouldn't need to be efficient; we don't invoke it often.
735 : *
736 : * If 'debug_discard' is true, we are being called as part of
737 : * debug_discard_caches. In that case, the cache is not reset for
738 : * correctness, but just to get more testing of cache invalidation. We skip
739 : * resetting in-progress build entries in that case, or we'd never make any
740 : * progress.
741 : */
742 : static void
743 362812 : ResetCatalogCache(CatCache *cache, bool debug_discard)
744 : {
745 : dlist_mutable_iter iter;
746 : int i;
747 :
748 : /* Remove each list in this cache, or at least mark it dead */
749 407964 : for (i = 0; i < cache->cc_nlbuckets; i++)
750 : {
751 45152 : dlist_head *bucket = &cache->cc_lbucket[i];
752 :
753 50184 : dlist_foreach_modify(iter, bucket)
754 : {
755 5032 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
756 :
757 5032 : if (cl->refcount > 0)
758 0 : cl->dead = true;
759 : else
760 5032 : CatCacheRemoveCList(cache, cl);
761 : }
762 : }
763 :
764 : /* Remove each tuple in this cache, or at least mark it dead */
765 10859932 : for (i = 0; i < cache->cc_nbuckets; i++)
766 : {
767 10497120 : dlist_head *bucket = &cache->cc_bucket[i];
768 :
769 10693726 : dlist_foreach_modify(iter, bucket)
770 : {
771 196606 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
772 :
773 196606 : if (ct->refcount > 0 ||
774 196596 : (ct->c_list && ct->c_list->refcount > 0))
775 : {
776 10 : ct->dead = true;
777 : /* list, if any, was marked dead above */
778 10 : Assert(ct->c_list == NULL || ct->c_list->dead);
779 : }
780 : else
781 196596 : CatCacheRemoveCTup(cache, ct);
782 : #ifdef CATCACHE_STATS
783 : cache->cc_invals++;
784 : #endif
785 : }
786 : }
787 :
788 : /* Also invalidate any entries that are being built */
789 362812 : if (!debug_discard)
790 : {
791 363322 : for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
792 : {
793 510 : if (e->cache == cache)
794 6 : e->dead = true;
795 : }
796 : }
797 362812 : }
798 :
799 : /*
800 : * ResetCatalogCaches
801 : *
802 : * Reset all caches when a shared cache inval event forces it
803 : */
804 : void
805 0 : ResetCatalogCaches(void)
806 : {
807 0 : ResetCatalogCachesExt(false);
808 0 : }
809 :
810 : void
811 4256 : ResetCatalogCachesExt(bool debug_discard)
812 : {
813 : slist_iter iter;
814 :
815 : CACHE_elog(DEBUG2, "ResetCatalogCaches called");
816 :
817 366016 : slist_foreach(iter, &CacheHdr->ch_caches)
818 : {
819 361760 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
820 :
821 361760 : ResetCatalogCache(cache, debug_discard);
822 : }
823 :
824 : CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
825 4256 : }
826 :
827 : /*
828 : * CatalogCacheFlushCatalog
829 : *
830 : * Flush all catcache entries that came from the specified system catalog.
831 : * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
832 : * tuples very likely now have different TIDs than before. (At one point
833 : * we also tried to force re-execution of CatalogCacheInitializeCache for
834 : * the cache(s) on that catalog. This is a bad idea since it leads to all
835 : * kinds of trouble if a cache flush occurs while loading cache entries.
836 : * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
837 : * rather than relying on the relcache to keep a tupdesc for us. Of course
838 : * this assumes the tupdesc of a cacheable system table will not change...)
839 : */
840 : void
841 774 : CatalogCacheFlushCatalog(Oid catId)
842 : {
843 : slist_iter iter;
844 :
845 : CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
846 :
847 66564 : slist_foreach(iter, &CacheHdr->ch_caches)
848 : {
849 65790 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
850 :
851 : /* Does this cache store tuples of the target catalog? */
852 65790 : if (cache->cc_reloid == catId)
853 : {
854 : /* Yes, so flush all its contents */
855 1052 : ResetCatalogCache(cache, false);
856 :
857 : /* Tell inval.c to call syscache callbacks for this cache */
858 1052 : CallSyscacheCallbacks(cache->id, 0);
859 : }
860 : }
861 :
862 : CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
863 774 : }
864 :
865 : /*
866 : * InitCatCache
867 : *
868 : * This allocates and initializes a cache for a system catalog relation.
869 : * Actually, the cache is only partially initialized to avoid opening the
870 : * relation. The relation will be opened and the rest of the cache
871 : * structure initialized on the first access.
872 : */
873 : #ifdef CACHEDEBUG
874 : #define InitCatCache_DEBUG2 \
875 : do { \
876 : elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
877 : cp->cc_reloid, cp->cc_indexoid, cp->id, \
878 : cp->cc_nkeys, cp->cc_nbuckets); \
879 : } while(0)
880 : #else
881 : #define InitCatCache_DEBUG2
882 : #endif
883 :
884 : CatCache *
885 3092980 : InitCatCache(int id,
886 : Oid reloid,
887 : Oid indexoid,
888 : int nkeys,
889 : const int *key,
890 : int nbuckets)
891 : {
892 : CatCache *cp;
893 : MemoryContext oldcxt;
894 : int i;
895 :
896 : /*
897 : * nbuckets is the initial number of hash buckets to use in this catcache.
898 : * It will be enlarged later if it becomes too full.
899 : *
900 : * nbuckets must be a power of two. We check this via Assert rather than
901 : * a full runtime check because the values will be coming from constant
902 : * tables.
903 : *
904 : * If you're confused by the power-of-two check, see comments in
905 : * bitmapset.c for an explanation.
906 : */
907 : Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
908 :
909 : /*
910 : * first switch to the cache context so our allocations do not vanish at
911 : * the end of a transaction
912 : */
913 3092980 : if (!CacheMemoryContext)
914 0 : CreateCacheMemoryContext();
915 :
916 3092980 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
917 :
918 : /*
919 : * if first time through, initialize the cache group header
920 : */
921 3092980 : if (CacheHdr == NULL)
922 : {
923 36388 : CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
924 36388 : slist_init(&CacheHdr->ch_caches);
925 36388 : CacheHdr->ch_ntup = 0;
926 : #ifdef CATCACHE_STATS
927 : /* set up to dump stats at backend exit */
928 : on_proc_exit(CatCachePrintStats, 0);
929 : #endif
930 : }
931 :
932 : /*
933 : * Allocate a new cache structure, aligning to a cacheline boundary
934 : *
935 : * Note: we rely on zeroing to initialize all the dlist headers correctly
936 : */
937 3092980 : cp = (CatCache *) palloc_aligned(sizeof(CatCache), PG_CACHE_LINE_SIZE,
938 : MCXT_ALLOC_ZERO);
939 3092980 : cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
940 :
941 : /*
942 : * Many catcaches never receive any list searches. Therefore, we don't
943 : * allocate the cc_lbuckets till we get a list search.
944 : */
945 3092980 : cp->cc_lbucket = NULL;
946 :
947 : /*
948 : * initialize the cache's relation information for the relation
949 : * corresponding to this cache, and initialize some of the new cache's
950 : * other internal fields. But don't open the relation yet.
951 : */
952 3092980 : cp->id = id;
953 3092980 : cp->cc_relname = "(not known yet)";
954 3092980 : cp->cc_reloid = reloid;
955 3092980 : cp->cc_indexoid = indexoid;
956 3092980 : cp->cc_relisshared = false; /* temporary */
957 3092980 : cp->cc_tupdesc = (TupleDesc) NULL;
958 3092980 : cp->cc_ntup = 0;
959 3092980 : cp->cc_nlist = 0;
960 3092980 : cp->cc_nbuckets = nbuckets;
961 3092980 : cp->cc_nlbuckets = 0;
962 3092980 : cp->cc_nkeys = nkeys;
963 8078136 : for (i = 0; i < nkeys; ++i)
964 : {
965 : Assert(AttributeNumberIsValid(key[i]));
966 4985156 : cp->cc_keyno[i] = key[i];
967 : }
968 :
969 : /*
970 : * new cache is initialized as far as we can go for now. print some
971 : * debugging information, if appropriate.
972 : */
973 : InitCatCache_DEBUG2;
974 :
975 : /*
976 : * add completed cache to top of group header's list
977 : */
978 3092980 : slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
979 :
980 : /*
981 : * back to the old context before we return...
982 : */
983 3092980 : MemoryContextSwitchTo(oldcxt);
984 :
985 3092980 : return cp;
986 : }
987 :
988 : /*
989 : * Enlarge a catcache, doubling the number of buckets.
990 : */
991 : static void
992 6436 : RehashCatCache(CatCache *cp)
993 : {
994 : dlist_head *newbucket;
995 : int newnbuckets;
996 : int i;
997 :
998 6436 : elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
999 : cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
1000 :
1001 : /* Allocate a new, larger, hash table. */
1002 6436 : newnbuckets = cp->cc_nbuckets * 2;
1003 6436 : newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
1004 :
1005 : /* Move all entries from old hash table to new. */
1006 578228 : for (i = 0; i < cp->cc_nbuckets; i++)
1007 : {
1008 : dlist_mutable_iter iter;
1009 :
1010 1721812 : dlist_foreach_modify(iter, &cp->cc_bucket[i])
1011 : {
1012 1150020 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
1013 1150020 : int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
1014 :
1015 1150020 : dlist_delete(iter.cur);
1016 1150020 : dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
1017 : }
1018 : }
1019 :
1020 : /* Switch to the new array. */
1021 6436 : pfree(cp->cc_bucket);
1022 6436 : cp->cc_nbuckets = newnbuckets;
1023 6436 : cp->cc_bucket = newbucket;
1024 6436 : }
1025 :
1026 : /*
1027 : * Enlarge a catcache's list storage, doubling the number of buckets.
1028 : */
1029 : static void
1030 1226 : RehashCatCacheLists(CatCache *cp)
1031 : {
1032 : dlist_head *newbucket;
1033 : int newnbuckets;
1034 : int i;
1035 :
1036 1226 : elog(DEBUG1, "rehashing catalog cache id %d for %s; %d lists, %d buckets",
1037 : cp->id, cp->cc_relname, cp->cc_nlist, cp->cc_nlbuckets);
1038 :
1039 : /* Allocate a new, larger, hash table. */
1040 1226 : newnbuckets = cp->cc_nlbuckets * 2;
1041 1226 : newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
1042 :
1043 : /* Move all entries from old hash table to new. */
1044 45866 : for (i = 0; i < cp->cc_nlbuckets; i++)
1045 : {
1046 : dlist_mutable_iter iter;
1047 :
1048 135146 : dlist_foreach_modify(iter, &cp->cc_lbucket[i])
1049 : {
1050 90506 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
1051 90506 : int hashIndex = HASH_INDEX(cl->hash_value, newnbuckets);
1052 :
1053 90506 : dlist_delete(iter.cur);
1054 90506 : dlist_push_head(&newbucket[hashIndex], &cl->cache_elem);
1055 : }
1056 : }
1057 :
1058 : /* Switch to the new array. */
1059 1226 : pfree(cp->cc_lbucket);
1060 1226 : cp->cc_nlbuckets = newnbuckets;
1061 1226 : cp->cc_lbucket = newbucket;
1062 1226 : }
1063 :
1064 : /*
1065 : * ConditionalCatalogCacheInitializeCache
1066 : *
1067 : * Call CatalogCacheInitializeCache() if not yet done.
1068 : */
1069 : pg_attribute_always_inline
1070 : static void
1071 107382094 : ConditionalCatalogCacheInitializeCache(CatCache *cache)
1072 : {
1073 : #ifdef USE_ASSERT_CHECKING
1074 : /*
1075 : * TypeCacheRelCallback() runs outside transactions and relies on TYPEOID
1076 : * for hashing. This isn't ideal. Since lookup_type_cache() both
1077 : * registers the callback and searches TYPEOID, reaching trouble likely
1078 : * requires OOM at an unlucky moment.
1079 : *
1080 : * InvalidateAttoptCacheCallback() runs outside transactions and likewise
1081 : * relies on ATTNUM. InitPostgres() initializes ATTNUM, so it's reliable.
1082 : */
1083 : if (!(cache->id == TYPEOID || cache->id == ATTNUM) ||
1084 : IsTransactionState())
1085 : AssertCouldGetRelation();
1086 : else
1087 : Assert(cache->cc_tupdesc != NULL);
1088 : #endif
1089 :
1090 107382094 : if (unlikely(cache->cc_tupdesc == NULL))
1091 805968 : CatalogCacheInitializeCache(cache);
1092 107382088 : }
1093 :
1094 : /*
1095 : * CatalogCacheInitializeCache
1096 : *
1097 : * This function does final initialization of a catcache: obtain the tuple
1098 : * descriptor and set up the hash and equality function links.
1099 : */
1100 : #ifdef CACHEDEBUG
1101 : #define CatalogCacheInitializeCache_DEBUG1 \
1102 : elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
1103 : cache->cc_reloid)
1104 :
1105 : #define CatalogCacheInitializeCache_DEBUG2 \
1106 : do { \
1107 : if (cache->cc_keyno[i] > 0) { \
1108 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
1109 : i+1, cache->cc_nkeys, cache->cc_keyno[i], \
1110 : TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
1111 : } else { \
1112 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
1113 : i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
1114 : } \
1115 : } while(0)
1116 : #else
1117 : #define CatalogCacheInitializeCache_DEBUG1
1118 : #define CatalogCacheInitializeCache_DEBUG2
1119 : #endif
1120 :
1121 : static void
1122 805968 : CatalogCacheInitializeCache(CatCache *cache)
1123 : {
1124 : Relation relation;
1125 : MemoryContext oldcxt;
1126 : TupleDesc tupdesc;
1127 : int i;
1128 :
1129 : CatalogCacheInitializeCache_DEBUG1;
1130 :
1131 805968 : relation = table_open(cache->cc_reloid, AccessShareLock);
1132 :
1133 : /*
1134 : * switch to the cache context so our allocations do not vanish at the end
1135 : * of a transaction
1136 : */
1137 : Assert(CacheMemoryContext != NULL);
1138 :
1139 805962 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1140 :
1141 : /*
1142 : * copy the relcache's tuple descriptor to permanent cache storage
1143 : */
1144 805962 : tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
1145 :
1146 : /*
1147 : * save the relation's name and relisshared flag, too (cc_relname is used
1148 : * only for debugging purposes)
1149 : */
1150 805962 : cache->cc_relname = pstrdup(RelationGetRelationName(relation));
1151 805962 : cache->cc_relisshared = RelationGetForm(relation)->relisshared;
1152 :
1153 : /*
1154 : * return to the caller's memory context and close the rel
1155 : */
1156 805962 : MemoryContextSwitchTo(oldcxt);
1157 :
1158 805962 : table_close(relation, AccessShareLock);
1159 :
1160 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
1161 : cache->cc_relname, cache->cc_nkeys);
1162 :
1163 : /*
1164 : * initialize cache's key information
1165 : */
1166 2079968 : for (i = 0; i < cache->cc_nkeys; ++i)
1167 : {
1168 : Oid keytype;
1169 : RegProcedure eqfunc;
1170 :
1171 : CatalogCacheInitializeCache_DEBUG2;
1172 :
1173 1274006 : if (cache->cc_keyno[i] > 0)
1174 : {
1175 1274006 : Form_pg_attribute attr = TupleDescAttr(tupdesc,
1176 1274006 : cache->cc_keyno[i] - 1);
1177 :
1178 1274006 : keytype = attr->atttypid;
1179 : /* cache key columns should always be NOT NULL */
1180 : Assert(attr->attnotnull);
1181 : }
1182 : else
1183 : {
1184 0 : if (cache->cc_keyno[i] < 0)
1185 0 : elog(FATAL, "sys attributes are not supported in caches");
1186 0 : keytype = OIDOID;
1187 : }
1188 :
1189 1274006 : GetCCHashEqFuncs(keytype,
1190 : &cache->cc_hashfunc[i],
1191 : &eqfunc,
1192 : &cache->cc_fastequal[i]);
1193 :
1194 : /*
1195 : * Do equality-function lookup (we assume this won't need a catalog
1196 : * lookup for any supported type)
1197 : */
1198 1274006 : fmgr_info_cxt(eqfunc,
1199 : &cache->cc_skey[i].sk_func,
1200 : CacheMemoryContext);
1201 :
1202 : /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1203 1274006 : cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
1204 :
1205 : /* Fill in sk_strategy as well --- always standard equality */
1206 1274006 : cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
1207 1274006 : cache->cc_skey[i].sk_subtype = InvalidOid;
1208 : /* If a catcache key requires a collation, it must be C collation */
1209 1274006 : cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1210 :
1211 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1212 : cache->cc_relname, i, cache);
1213 : }
1214 :
1215 : /*
1216 : * mark this cache fully initialized
1217 : */
1218 805962 : cache->cc_tupdesc = tupdesc;
1219 805962 : }
1220 :
1221 : /*
1222 : * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1223 : *
1224 : * One reason to call this routine is to ensure that the relcache has
1225 : * created entries for all the catalogs and indexes referenced by catcaches.
1226 : * Therefore, provide an option to open the index as well as fixing the
1227 : * cache itself. An exception is the indexes on pg_am, which we don't use
1228 : * (cf. IndexScanOK).
1229 : */
1230 : void
1231 358588 : InitCatCachePhase2(CatCache *cache, bool touch_index)
1232 : {
1233 358588 : ConditionalCatalogCacheInitializeCache(cache);
1234 :
1235 358586 : if (touch_index &&
1236 323820 : cache->id != AMOID &&
1237 320010 : cache->id != AMNAME)
1238 : {
1239 : Relation idesc;
1240 :
1241 : /*
1242 : * We must lock the underlying catalog before opening the index to
1243 : * avoid deadlock, since index_open could possibly result in reading
1244 : * this same catalog, and if anyone else is exclusive-locking this
1245 : * catalog and index they'll be doing it in that order.
1246 : */
1247 316200 : LockRelationOid(cache->cc_reloid, AccessShareLock);
1248 316200 : idesc = index_open(cache->cc_indexoid, AccessShareLock);
1249 :
1250 : /*
1251 : * While we've got the index open, let's check that it's unique (and
1252 : * not just deferrable-unique, thank you very much). This is just to
1253 : * catch thinkos in definitions of new catcaches, so we don't worry
1254 : * about the pg_am indexes not getting tested.
1255 : */
1256 : Assert(idesc->rd_index->indisunique &&
1257 : idesc->rd_index->indimmediate);
1258 :
1259 316200 : index_close(idesc, AccessShareLock);
1260 316200 : UnlockRelationOid(cache->cc_reloid, AccessShareLock);
1261 : }
1262 358586 : }
1263 :
1264 :
1265 : /*
1266 : * IndexScanOK
1267 : *
1268 : * This function checks for tuples that will be fetched by
1269 : * IndexSupportInitialize() during relcache initialization for
1270 : * certain system indexes that support critical syscaches.
1271 : * We can't use an indexscan to fetch these, else we'll get into
1272 : * infinite recursion. A plain heap scan will work, however.
1273 : * Once we have completed relcache initialization (signaled by
1274 : * criticalRelcachesBuilt), we don't have to worry anymore.
1275 : *
1276 : * Similarly, during backend startup we have to be able to use the
1277 : * pg_authid, pg_auth_members and pg_database syscaches for
1278 : * authentication even if we don't yet have relcache entries for those
1279 : * catalogs' indexes.
1280 : */
1281 : static bool
1282 7022518 : IndexScanOK(CatCache *cache)
1283 : {
1284 7022518 : switch (cache->id)
1285 : {
1286 742502 : case INDEXRELID:
1287 :
1288 : /*
1289 : * Rather than tracking exactly which indexes have to be loaded
1290 : * before we can use indexscans (which changes from time to time),
1291 : * just force all pg_index searches to be heap scans until we've
1292 : * built the critical relcaches.
1293 : */
1294 742502 : if (!criticalRelcachesBuilt)
1295 39494 : return false;
1296 703008 : break;
1297 :
1298 63740 : case AMOID:
1299 : case AMNAME:
1300 :
1301 : /*
1302 : * Always do heap scans in pg_am, because it's so small there's
1303 : * not much point in an indexscan anyway. We *must* do this when
1304 : * initially building critical relcache entries, but we might as
1305 : * well just always do it.
1306 : */
1307 63740 : return false;
1308 :
1309 115166 : case AUTHNAME:
1310 : case AUTHOID:
1311 : case AUTHMEMMEMROLE:
1312 : case DATABASEOID:
1313 :
1314 : /*
1315 : * Protect authentication lookups occurring before relcache has
1316 : * collected entries for shared indexes.
1317 : */
1318 115166 : if (!criticalSharedRelcachesBuilt)
1319 4900 : return false;
1320 110266 : break;
1321 :
1322 6101110 : default:
1323 6101110 : break;
1324 : }
1325 :
1326 : /* Normal case, allow index scan */
1327 6914384 : return true;
1328 : }
1329 :
1330 : /*
1331 : * SearchCatCache
1332 : *
1333 : * This call searches a system cache for a tuple, opening the relation
1334 : * if necessary (on the first access to a particular cache).
1335 : *
1336 : * The result is NULL if not found, or a pointer to a HeapTuple in
1337 : * the cache. The caller must not modify the tuple, and must call
1338 : * ReleaseCatCache() when done with it.
1339 : *
1340 : * The search key values should be expressed as Datums of the key columns'
1341 : * datatype(s). (Pass zeroes for any unused parameters.) As a special
1342 : * exception, the passed-in key for a NAME column can be just a C string;
1343 : * the caller need not go to the trouble of converting it to a fully
1344 : * null-padded NAME.
1345 : */
1346 : HeapTuple
1347 5827584 : SearchCatCache(CatCache *cache,
1348 : Datum v1,
1349 : Datum v2,
1350 : Datum v3,
1351 : Datum v4)
1352 : {
1353 5827584 : return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1354 : }
1355 :
1356 :
1357 : /*
1358 : * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1359 : * arguments. The compiler can inline the body and unroll loops, making them a
1360 : * bit faster than SearchCatCache().
1361 : */
1362 :
1363 : HeapTuple
1364 73448858 : SearchCatCache1(CatCache *cache,
1365 : Datum v1)
1366 : {
1367 73448858 : return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1368 : }
1369 :
1370 :
1371 : HeapTuple
1372 6173040 : SearchCatCache2(CatCache *cache,
1373 : Datum v1, Datum v2)
1374 : {
1375 6173040 : return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1376 : }
1377 :
1378 :
1379 : HeapTuple
1380 5899148 : SearchCatCache3(CatCache *cache,
1381 : Datum v1, Datum v2, Datum v3)
1382 : {
1383 5899148 : return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1384 : }
1385 :
1386 :
1387 : HeapTuple
1388 4575832 : SearchCatCache4(CatCache *cache,
1389 : Datum v1, Datum v2, Datum v3, Datum v4)
1390 : {
1391 4575832 : return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1392 : }
1393 :
1394 : /*
1395 : * Work-horse for SearchCatCache/SearchCatCacheN.
1396 : */
1397 : static inline HeapTuple
1398 95924462 : SearchCatCacheInternal(CatCache *cache,
1399 : int nkeys,
1400 : Datum v1,
1401 : Datum v2,
1402 : Datum v3,
1403 : Datum v4)
1404 : {
1405 : Datum arguments[CATCACHE_MAXKEYS];
1406 : uint32 hashValue;
1407 : Index hashIndex;
1408 : dlist_iter iter;
1409 : dlist_head *bucket;
1410 : CatCTup *ct;
1411 :
1412 : Assert(cache->cc_nkeys == nkeys);
1413 :
1414 : /*
1415 : * one-time startup overhead for each cache
1416 : */
1417 95924462 : ConditionalCatalogCacheInitializeCache(cache);
1418 :
1419 : #ifdef CATCACHE_STATS
1420 : cache->cc_searches++;
1421 : #endif
1422 :
1423 : /* Initialize local parameter array */
1424 95924458 : arguments[0] = v1;
1425 95924458 : arguments[1] = v2;
1426 95924458 : arguments[2] = v3;
1427 95924458 : arguments[3] = v4;
1428 :
1429 : /*
1430 : * find the hash bucket in which to look for the tuple
1431 : */
1432 95924458 : hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1433 95924458 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1434 :
1435 : /*
1436 : * scan the hash bucket until we find a match or exhaust our tuples
1437 : *
1438 : * Note: it's okay to use dlist_foreach here, even though we modify the
1439 : * dlist within the loop, because we don't continue the loop afterwards.
1440 : */
1441 95924458 : bucket = &cache->cc_bucket[hashIndex];
1442 102892310 : dlist_foreach(iter, bucket)
1443 : {
1444 96188926 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1445 :
1446 96188926 : if (ct->dead)
1447 0 : continue; /* ignore dead entries */
1448 :
1449 96188926 : if (ct->hash_value != hashValue)
1450 6967852 : continue; /* quickly skip entry if wrong hash val */
1451 :
1452 89221074 : if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1453 0 : continue;
1454 :
1455 : /*
1456 : * We found a match in the cache. Move it to the front of the list
1457 : * for its hashbucket, in order to speed subsequent searches. (The
1458 : * most frequently accessed elements in any hashbucket will tend to be
1459 : * near the front of the hashbucket's list.)
1460 : */
1461 89221074 : dlist_move_head(bucket, &ct->cache_elem);
1462 :
1463 : /*
1464 : * If it's a positive entry, bump its refcount and return it. If it's
1465 : * negative, we can report failure to the caller.
1466 : */
1467 89221074 : if (!ct->negative)
1468 : {
1469 84857212 : ResourceOwnerEnlarge(CurrentResourceOwner);
1470 84857212 : ct->refcount++;
1471 84857212 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1472 :
1473 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1474 : cache->cc_relname, hashIndex);
1475 :
1476 : #ifdef CATCACHE_STATS
1477 : cache->cc_hits++;
1478 : #endif
1479 :
1480 84857212 : return &ct->tuple;
1481 : }
1482 : else
1483 : {
1484 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1485 : cache->cc_relname, hashIndex);
1486 :
1487 : #ifdef CATCACHE_STATS
1488 : cache->cc_neg_hits++;
1489 : #endif
1490 :
1491 4363862 : return NULL;
1492 : }
1493 : }
1494 :
1495 6703384 : return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1496 : }
1497 :
1498 : /*
1499 : * Search the actual catalogs, rather than the cache.
1500 : *
1501 : * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1502 : * as small as possible. To avoid that effort being undone by a helpful
1503 : * compiler, try to explicitly forbid inlining.
1504 : */
1505 : static pg_noinline HeapTuple
1506 6703384 : SearchCatCacheMiss(CatCache *cache,
1507 : int nkeys,
1508 : uint32 hashValue,
1509 : Index hashIndex,
1510 : Datum v1,
1511 : Datum v2,
1512 : Datum v3,
1513 : Datum v4)
1514 : {
1515 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1516 : Relation relation;
1517 : SysScanDesc scandesc;
1518 : HeapTuple ntp;
1519 : CatCTup *ct;
1520 : bool stale;
1521 : Datum arguments[CATCACHE_MAXKEYS];
1522 :
1523 : /* Initialize local parameter array */
1524 6703384 : arguments[0] = v1;
1525 6703384 : arguments[1] = v2;
1526 6703384 : arguments[2] = v3;
1527 6703384 : arguments[3] = v4;
1528 :
1529 : /*
1530 : * Tuple was not found in cache, so we have to try to retrieve it directly
1531 : * from the relation. If found, we will add it to the cache; if not
1532 : * found, we will add a negative cache entry instead.
1533 : *
1534 : * NOTE: it is possible for recursive cache lookups to occur while reading
1535 : * the relation --- for example, due to shared-cache-inval messages being
1536 : * processed during table_open(). This is OK. It's even possible for one
1537 : * of those lookups to find and enter the very same tuple we are trying to
1538 : * fetch here. If that happens, we will enter a second copy of the tuple
1539 : * into the cache. The first copy will never be referenced again, and
1540 : * will eventually age out of the cache, so there's no functional problem.
1541 : * This case is rare enough that it's not worth expending extra cycles to
1542 : * detect.
1543 : *
1544 : * Another case, which we *must* handle, is that the tuple could become
1545 : * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
1546 : * AcceptInvalidationMessages can run during TOAST table access). We do
1547 : * not want to return already-stale catcache entries, so we loop around
1548 : * and do the table scan again if that happens.
1549 : */
1550 6703384 : relation = table_open(cache->cc_reloid, AccessShareLock);
1551 :
1552 : /*
1553 : * Ok, need to make a lookup in the relation, copy the scankey and fill
1554 : * out any per-call fields.
1555 : */
1556 6703384 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1557 6703384 : cur_skey[0].sk_argument = v1;
1558 6703384 : cur_skey[1].sk_argument = v2;
1559 6703384 : cur_skey[2].sk_argument = v3;
1560 6703384 : cur_skey[3].sk_argument = v4;
1561 :
1562 : do
1563 : {
1564 6703384 : scandesc = systable_beginscan(relation,
1565 : cache->cc_indexoid,
1566 6703384 : IndexScanOK(cache),
1567 : NULL,
1568 : nkeys,
1569 : cur_skey);
1570 :
1571 6703384 : ct = NULL;
1572 6703384 : stale = false;
1573 :
1574 6703384 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1575 : {
1576 4937778 : ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1577 : hashValue, hashIndex);
1578 : /* upon failure, we must start the scan over */
1579 4937778 : if (ct == NULL)
1580 : {
1581 0 : stale = true;
1582 0 : break;
1583 : }
1584 : /* immediately set the refcount to 1 */
1585 4937778 : ResourceOwnerEnlarge(CurrentResourceOwner);
1586 4937778 : ct->refcount++;
1587 4937778 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1588 4937778 : break; /* assume only one match */
1589 : }
1590 :
1591 6703382 : systable_endscan(scandesc);
1592 6703382 : } while (stale);
1593 :
1594 6703382 : table_close(relation, AccessShareLock);
1595 :
1596 : /*
1597 : * If tuple was not found, we need to build a negative cache entry
1598 : * containing a fake tuple. The fake tuple has the correct key columns,
1599 : * but nulls everywhere else.
1600 : *
1601 : * In bootstrap mode, we don't build negative entries, because the cache
1602 : * invalidation mechanism isn't alive and can't clear them if the tuple
1603 : * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1604 : * cache inval for that.)
1605 : */
1606 6703382 : if (ct == NULL)
1607 : {
1608 1765604 : if (IsBootstrapProcessingMode())
1609 54000 : return NULL;
1610 :
1611 1711604 : ct = CatalogCacheCreateEntry(cache, NULL, arguments,
1612 : hashValue, hashIndex);
1613 :
1614 : /* Creating a negative cache entry shouldn't fail */
1615 : Assert(ct != NULL);
1616 :
1617 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1618 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1619 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1620 : cache->cc_relname, hashIndex);
1621 :
1622 : /*
1623 : * We are not returning the negative entry to the caller, so leave its
1624 : * refcount zero.
1625 : */
1626 :
1627 1711604 : return NULL;
1628 : }
1629 :
1630 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1631 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1632 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1633 : cache->cc_relname, hashIndex);
1634 :
1635 : #ifdef CATCACHE_STATS
1636 : cache->cc_newloads++;
1637 : #endif
1638 :
1639 4937778 : return &ct->tuple;
1640 : }
1641 :
1642 : /*
1643 : * ReleaseCatCache
1644 : *
1645 : * Decrement the reference count of a catcache entry (releasing the
1646 : * hold grabbed by a successful SearchCatCache).
1647 : *
1648 : * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1649 : * will be freed as soon as their refcount goes to zero. In combination
1650 : * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1651 : * to catch references to already-released catcache entries.
1652 : */
1653 : void
1654 89784096 : ReleaseCatCache(HeapTuple tuple)
1655 : {
1656 89784096 : ReleaseCatCacheWithOwner(tuple, CurrentResourceOwner);
1657 89784096 : }
1658 :
1659 : static void
1660 89794990 : ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner)
1661 : {
1662 89794990 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
1663 : offsetof(CatCTup, tuple));
1664 :
1665 : /* Safety checks to ensure we were handed a cache entry */
1666 : Assert(ct->ct_magic == CT_MAGIC);
1667 : Assert(ct->refcount > 0);
1668 :
1669 89794990 : ct->refcount--;
1670 89794990 : if (resowner)
1671 89784096 : ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1672 :
1673 89794990 : if (
1674 : #ifndef CATCACHE_FORCE_RELEASE
1675 89794990 : ct->dead &&
1676 : #endif
1677 1628 : ct->refcount == 0 &&
1678 1504 : (ct->c_list == NULL || ct->c_list->refcount == 0))
1679 1504 : CatCacheRemoveCTup(ct->my_cache, ct);
1680 89794990 : }
1681 :
1682 :
1683 : /*
1684 : * GetCatCacheHashValue
1685 : *
1686 : * Compute the hash value for a given set of search keys.
1687 : *
1688 : * The reason for exposing this as part of the API is that the hash value is
1689 : * exposed in cache invalidation operations, so there are places outside the
1690 : * catcache code that need to be able to compute the hash values.
1691 : */
1692 : uint32
1693 1120606 : GetCatCacheHashValue(CatCache *cache,
1694 : Datum v1,
1695 : Datum v2,
1696 : Datum v3,
1697 : Datum v4)
1698 : {
1699 : /*
1700 : * one-time startup overhead for each cache
1701 : */
1702 1120606 : ConditionalCatalogCacheInitializeCache(cache);
1703 :
1704 : /*
1705 : * calculate the hash value
1706 : */
1707 1120606 : return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1708 : }
1709 :
1710 :
1711 : /*
1712 : * SearchCatCacheList
1713 : *
1714 : * Generate a list of all tuples matching a partial key (that is,
1715 : * a key specifying just the first K of the cache's N key columns).
1716 : *
1717 : * It doesn't make any sense to specify all of the cache's key columns
1718 : * here: since the key is unique, there could be at most one match, so
1719 : * you ought to use SearchCatCache() instead. Hence this function takes
1720 : * one fewer Datum argument than SearchCatCache() does.
1721 : *
1722 : * The caller must not modify the list object or the pointed-to tuples,
1723 : * and must call ReleaseCatCacheList() when done with the list.
1724 : */
1725 : CatCList *
1726 3951280 : SearchCatCacheList(CatCache *cache,
1727 : int nkeys,
1728 : Datum v1,
1729 : Datum v2,
1730 : Datum v3)
1731 : {
1732 3951280 : Datum v4 = 0; /* dummy last-column value */
1733 : Datum arguments[CATCACHE_MAXKEYS];
1734 : uint32 lHashValue;
1735 : Index lHashIndex;
1736 : dlist_iter iter;
1737 : dlist_head *lbucket;
1738 : CatCList *cl;
1739 : CatCTup *ct;
1740 : List *volatile ctlist;
1741 : ListCell *ctlist_item;
1742 : int nmembers;
1743 : bool ordered;
1744 : HeapTuple ntp;
1745 : MemoryContext oldcxt;
1746 : int i;
1747 : CatCInProgress *save_in_progress;
1748 : CatCInProgress in_progress_ent;
1749 :
1750 : /*
1751 : * one-time startup overhead for each cache
1752 : */
1753 3951280 : ConditionalCatalogCacheInitializeCache(cache);
1754 :
1755 : Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1756 :
1757 : #ifdef CATCACHE_STATS
1758 : cache->cc_lsearches++;
1759 : #endif
1760 :
1761 : /* Initialize local parameter array */
1762 3951280 : arguments[0] = v1;
1763 3951280 : arguments[1] = v2;
1764 3951280 : arguments[2] = v3;
1765 3951280 : arguments[3] = v4;
1766 :
1767 : /*
1768 : * If we haven't previously done a list search in this cache, create the
1769 : * bucket header array; otherwise, consider whether it's time to enlarge
1770 : * it.
1771 : */
1772 3951280 : if (cache->cc_lbucket == NULL)
1773 : {
1774 : /* Arbitrary initial size --- must be a power of 2 */
1775 42448 : int nbuckets = 16;
1776 :
1777 42448 : cache->cc_lbucket = (dlist_head *)
1778 42448 : MemoryContextAllocZero(CacheMemoryContext,
1779 : nbuckets * sizeof(dlist_head));
1780 : /* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */
1781 42448 : cache->cc_nlbuckets = nbuckets;
1782 : }
1783 : else
1784 : {
1785 : /*
1786 : * If the hash table has become too full, enlarge the buckets array.
1787 : * Quite arbitrarily, we enlarge when fill factor > 2.
1788 : */
1789 3908832 : if (cache->cc_nlist > cache->cc_nlbuckets * 2)
1790 1226 : RehashCatCacheLists(cache);
1791 : }
1792 :
1793 : /*
1794 : * Find the hash bucket in which to look for the CatCList.
1795 : */
1796 3951280 : lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1797 3951280 : lHashIndex = HASH_INDEX(lHashValue, cache->cc_nlbuckets);
1798 :
1799 : /*
1800 : * scan the items until we find a match or exhaust our list
1801 : *
1802 : * Note: it's okay to use dlist_foreach here, even though we modify the
1803 : * dlist within the loop, because we don't continue the loop afterwards.
1804 : */
1805 3951280 : lbucket = &cache->cc_lbucket[lHashIndex];
1806 4336716 : dlist_foreach(iter, lbucket)
1807 : {
1808 4017604 : cl = dlist_container(CatCList, cache_elem, iter.cur);
1809 :
1810 4017604 : if (cl->dead)
1811 0 : continue; /* ignore dead entries */
1812 :
1813 4017604 : if (cl->hash_value != lHashValue)
1814 385436 : continue; /* quickly skip entry if wrong hash val */
1815 :
1816 : /*
1817 : * see if the cached list matches our key.
1818 : */
1819 3632168 : if (cl->nkeys != nkeys)
1820 0 : continue;
1821 :
1822 3632168 : if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1823 0 : continue;
1824 :
1825 : /*
1826 : * We found a matching list. Move the list to the front of the list
1827 : * for its hashbucket, so as to speed subsequent searches. (We do not
1828 : * move the members to the fronts of their hashbucket lists, however,
1829 : * since there's no point in that unless they are searched for
1830 : * individually.)
1831 : */
1832 3632168 : dlist_move_head(lbucket, &cl->cache_elem);
1833 :
1834 : /* Bump the list's refcount and return it */
1835 3632168 : ResourceOwnerEnlarge(CurrentResourceOwner);
1836 3632168 : cl->refcount++;
1837 3632168 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1838 :
1839 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1840 : cache->cc_relname);
1841 :
1842 : #ifdef CATCACHE_STATS
1843 : cache->cc_lhits++;
1844 : #endif
1845 :
1846 3632168 : return cl;
1847 : }
1848 :
1849 : /*
1850 : * List was not found in cache, so we have to build it by reading the
1851 : * relation. For each matching tuple found in the relation, use an
1852 : * existing cache entry if possible, else build a new one.
1853 : *
1854 : * We have to bump the member refcounts temporarily to ensure they won't
1855 : * get dropped from the cache while loading other members. We use a PG_TRY
1856 : * block to ensure we can undo those refcounts if we get an error before
1857 : * we finish constructing the CatCList. ctlist must be valid throughout
1858 : * the PG_TRY block.
1859 : */
1860 319112 : ctlist = NIL;
1861 :
1862 : /*
1863 : * Cache invalidation can happen while we're building the list.
1864 : * CatalogCacheCreateEntry() handles concurrent invalidation of individual
1865 : * tuples, but it's also possible that a new entry is concurrently added
1866 : * that should be part of the list we're building. Register an
1867 : * "in-progress" entry that will receive the invalidation, until we have
1868 : * built the final list entry.
1869 : */
1870 319112 : save_in_progress = catcache_in_progress_stack;
1871 319112 : in_progress_ent.next = catcache_in_progress_stack;
1872 319112 : in_progress_ent.cache = cache;
1873 319112 : in_progress_ent.hash_value = lHashValue;
1874 319112 : in_progress_ent.list = true;
1875 319112 : in_progress_ent.dead = false;
1876 319112 : catcache_in_progress_stack = &in_progress_ent;
1877 :
1878 319112 : PG_TRY();
1879 : {
1880 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1881 : Relation relation;
1882 : SysScanDesc scandesc;
1883 319112 : bool first_iter = true;
1884 :
1885 319112 : relation = table_open(cache->cc_reloid, AccessShareLock);
1886 :
1887 : /*
1888 : * Ok, need to make a lookup in the relation, copy the scankey and
1889 : * fill out any per-call fields.
1890 : */
1891 319112 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1892 319112 : cur_skey[0].sk_argument = v1;
1893 319112 : cur_skey[1].sk_argument = v2;
1894 319112 : cur_skey[2].sk_argument = v3;
1895 319112 : cur_skey[3].sk_argument = v4;
1896 :
1897 : /*
1898 : * Scan the table for matching entries. If an invalidation arrives
1899 : * mid-build, we will loop back here to retry.
1900 : */
1901 : do
1902 : {
1903 : /*
1904 : * If we are retrying, release refcounts on any items created on
1905 : * the previous iteration. We dare not try to free them if
1906 : * they're now unreferenced, since an error while doing that would
1907 : * result in the PG_CATCH below doing extra refcount decrements.
1908 : * Besides, we'll likely re-adopt those items in the next
1909 : * iteration, so it's not worth complicating matters to try to get
1910 : * rid of them.
1911 : */
1912 319136 : foreach(ctlist_item, ctlist)
1913 : {
1914 2 : ct = (CatCTup *) lfirst(ctlist_item);
1915 : Assert(ct->c_list == NULL);
1916 : Assert(ct->refcount > 0);
1917 2 : ct->refcount--;
1918 : }
1919 : /* Reset ctlist in preparation for new try */
1920 319134 : ctlist = NIL;
1921 319134 : in_progress_ent.dead = false;
1922 :
1923 638268 : scandesc = systable_beginscan(relation,
1924 : cache->cc_indexoid,
1925 319134 : IndexScanOK(cache),
1926 : NULL,
1927 : nkeys,
1928 : cur_skey);
1929 :
1930 : /* The list will be ordered iff we are doing an index scan */
1931 319134 : ordered = (scandesc->irel != NULL);
1932 :
1933 : /* Injection point to help testing the recursive invalidation case */
1934 319134 : if (first_iter)
1935 : {
1936 319112 : INJECTION_POINT("catcache-list-miss-systable-scan-started", NULL);
1937 319112 : first_iter = false;
1938 : }
1939 :
1940 1305758 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)) &&
1941 986644 : !in_progress_ent.dead)
1942 : {
1943 : uint32 hashValue;
1944 : Index hashIndex;
1945 986624 : bool found = false;
1946 : dlist_head *bucket;
1947 :
1948 : /*
1949 : * See if there's an entry for this tuple already.
1950 : */
1951 986624 : ct = NULL;
1952 986624 : hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1953 986624 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1954 :
1955 986624 : bucket = &cache->cc_bucket[hashIndex];
1956 1356884 : dlist_foreach(iter, bucket)
1957 : {
1958 519980 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1959 :
1960 519980 : if (ct->dead || ct->negative)
1961 1062 : continue; /* ignore dead and negative entries */
1962 :
1963 518918 : if (ct->hash_value != hashValue)
1964 350784 : continue; /* quickly skip entry if wrong hash val */
1965 :
1966 168134 : if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1967 0 : continue; /* not same tuple */
1968 :
1969 : /*
1970 : * Found a match, but can't use it if it belongs to
1971 : * another list already
1972 : */
1973 168134 : if (ct->c_list)
1974 18414 : continue;
1975 :
1976 149720 : found = true;
1977 149720 : break; /* A-OK */
1978 : }
1979 :
1980 986624 : if (!found)
1981 : {
1982 : /* We didn't find a usable entry, so make a new one */
1983 836904 : ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1984 : hashValue, hashIndex);
1985 :
1986 : /* upon failure, we must start the scan over */
1987 836904 : if (ct == NULL)
1988 : {
1989 0 : in_progress_ent.dead = true;
1990 0 : break;
1991 : }
1992 : }
1993 :
1994 : /* Careful here: add entry to ctlist, then bump its refcount */
1995 : /* This way leaves state correct if lappend runs out of memory */
1996 986624 : ctlist = lappend(ctlist, ct);
1997 986624 : ct->refcount++;
1998 : }
1999 :
2000 319134 : systable_endscan(scandesc);
2001 319134 : } while (in_progress_ent.dead);
2002 :
2003 319112 : table_close(relation, AccessShareLock);
2004 :
2005 : /* Make sure the resource owner has room to remember this entry. */
2006 319112 : ResourceOwnerEnlarge(CurrentResourceOwner);
2007 :
2008 : /* Now we can build the CatCList entry. */
2009 319112 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2010 319112 : nmembers = list_length(ctlist);
2011 : cl = (CatCList *)
2012 319112 : palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
2013 :
2014 : /* Extract key values */
2015 319112 : CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
2016 319112 : arguments, cl->keys);
2017 319112 : MemoryContextSwitchTo(oldcxt);
2018 :
2019 : /*
2020 : * We are now past the last thing that could trigger an elog before we
2021 : * have finished building the CatCList and remembering it in the
2022 : * resource owner. So it's OK to fall out of the PG_TRY, and indeed
2023 : * we'd better do so before we start marking the members as belonging
2024 : * to the list.
2025 : */
2026 : }
2027 0 : PG_CATCH();
2028 : {
2029 : Assert(catcache_in_progress_stack == &in_progress_ent);
2030 0 : catcache_in_progress_stack = save_in_progress;
2031 :
2032 0 : foreach(ctlist_item, ctlist)
2033 : {
2034 0 : ct = (CatCTup *) lfirst(ctlist_item);
2035 : Assert(ct->c_list == NULL);
2036 : Assert(ct->refcount > 0);
2037 0 : ct->refcount--;
2038 0 : if (
2039 : #ifndef CATCACHE_FORCE_RELEASE
2040 0 : ct->dead &&
2041 : #endif
2042 0 : ct->refcount == 0 &&
2043 0 : (ct->c_list == NULL || ct->c_list->refcount == 0))
2044 0 : CatCacheRemoveCTup(cache, ct);
2045 : }
2046 :
2047 0 : PG_RE_THROW();
2048 : }
2049 319112 : PG_END_TRY();
2050 : Assert(catcache_in_progress_stack == &in_progress_ent);
2051 319112 : catcache_in_progress_stack = save_in_progress;
2052 :
2053 319112 : cl->cl_magic = CL_MAGIC;
2054 319112 : cl->my_cache = cache;
2055 319112 : cl->refcount = 0; /* for the moment */
2056 319112 : cl->dead = false;
2057 319112 : cl->ordered = ordered;
2058 319112 : cl->nkeys = nkeys;
2059 319112 : cl->hash_value = lHashValue;
2060 319112 : cl->n_members = nmembers;
2061 :
2062 319112 : i = 0;
2063 1305734 : foreach(ctlist_item, ctlist)
2064 : {
2065 986622 : cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
2066 : Assert(ct->c_list == NULL);
2067 986622 : ct->c_list = cl;
2068 : /* release the temporary refcount on the member */
2069 : Assert(ct->refcount > 0);
2070 986622 : ct->refcount--;
2071 : /* mark list dead if any members already dead */
2072 986622 : if (ct->dead)
2073 0 : cl->dead = true;
2074 : }
2075 : Assert(i == nmembers);
2076 :
2077 : /*
2078 : * Add the CatCList to the appropriate bucket, and count it.
2079 : */
2080 319112 : dlist_push_head(lbucket, &cl->cache_elem);
2081 :
2082 319112 : cache->cc_nlist++;
2083 :
2084 : /* Finally, bump the list's refcount and return it */
2085 319112 : cl->refcount++;
2086 319112 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
2087 :
2088 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
2089 : cache->cc_relname, nmembers);
2090 :
2091 319112 : return cl;
2092 : }
2093 :
2094 : /*
2095 : * ReleaseCatCacheList
2096 : *
2097 : * Decrement the reference count of a catcache list.
2098 : */
2099 : void
2100 3951244 : ReleaseCatCacheList(CatCList *list)
2101 : {
2102 3951244 : ReleaseCatCacheListWithOwner(list, CurrentResourceOwner);
2103 3951244 : }
2104 :
2105 : static void
2106 3951280 : ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner)
2107 : {
2108 : /* Safety checks to ensure we were handed a cache entry */
2109 : Assert(list->cl_magic == CL_MAGIC);
2110 : Assert(list->refcount > 0);
2111 3951280 : list->refcount--;
2112 3951280 : if (resowner)
2113 3951244 : ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
2114 :
2115 3951280 : if (
2116 : #ifndef CATCACHE_FORCE_RELEASE
2117 3951280 : list->dead &&
2118 : #endif
2119 6 : list->refcount == 0)
2120 6 : CatCacheRemoveCList(list->my_cache, list);
2121 3951280 : }
2122 :
2123 :
2124 : /*
2125 : * CatalogCacheCreateEntry
2126 : * Create a new CatCTup entry, copying the given HeapTuple and other
2127 : * supplied data into it. The new entry initially has refcount 0.
2128 : *
2129 : * To create a normal cache entry, ntp must be the HeapTuple just fetched
2130 : * from scandesc, and "arguments" is not used. To create a negative cache
2131 : * entry, pass NULL for ntp; then "arguments" is the cache keys to use.
2132 : * In either case, hashValue/hashIndex are the hash values computed from
2133 : * the cache keys.
2134 : *
2135 : * Returns NULL if we attempt to detoast the tuple and observe that it
2136 : * became stale. (This cannot happen for a negative entry.) Caller must
2137 : * retry the tuple lookup in that case.
2138 : */
2139 : static CatCTup *
2140 7486286 : CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
2141 : uint32 hashValue, Index hashIndex)
2142 : {
2143 : CatCTup *ct;
2144 : MemoryContext oldcxt;
2145 :
2146 7486286 : if (ntp)
2147 : {
2148 : int i;
2149 5774682 : HeapTuple dtp = NULL;
2150 :
2151 : /*
2152 : * The invalidation of the in-progress entry essentially never happens
2153 : * during our regression tests, and there's no easy way to force it to
2154 : * fail for testing purposes. To ensure we have test coverage for the
2155 : * retry paths in our callers, make debug builds randomly fail about
2156 : * 0.1% of the times through this code path, even when there's no
2157 : * toasted fields.
2158 : */
2159 : #ifdef USE_ASSERT_CHECKING
2160 : if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 1000))
2161 : return NULL;
2162 : #endif
2163 :
2164 : /*
2165 : * If there are any out-of-line toasted fields in the tuple, expand
2166 : * them in-line. This saves cycles during later use of the catcache
2167 : * entry, and also protects us against the possibility of the toast
2168 : * tuples being freed before we attempt to fetch them, in case of
2169 : * something using a slightly stale catcache entry.
2170 : */
2171 5774682 : if (HeapTupleHasExternal(ntp))
2172 : {
2173 : CatCInProgress *save_in_progress;
2174 : CatCInProgress in_progress_ent;
2175 :
2176 : /*
2177 : * The tuple could become stale while we are doing toast table
2178 : * access (since AcceptInvalidationMessages can run then). The
2179 : * invalidation will mark our in-progress entry as dead.
2180 : */
2181 4090 : save_in_progress = catcache_in_progress_stack;
2182 4090 : in_progress_ent.next = catcache_in_progress_stack;
2183 4090 : in_progress_ent.cache = cache;
2184 4090 : in_progress_ent.hash_value = hashValue;
2185 4090 : in_progress_ent.list = false;
2186 4090 : in_progress_ent.dead = false;
2187 4090 : catcache_in_progress_stack = &in_progress_ent;
2188 :
2189 4090 : PG_TRY();
2190 : {
2191 4090 : dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
2192 : }
2193 0 : PG_FINALLY();
2194 : {
2195 : Assert(catcache_in_progress_stack == &in_progress_ent);
2196 4090 : catcache_in_progress_stack = save_in_progress;
2197 : }
2198 4090 : PG_END_TRY();
2199 :
2200 4090 : if (in_progress_ent.dead)
2201 : {
2202 0 : heap_freetuple(dtp);
2203 0 : return NULL;
2204 : }
2205 : }
2206 : else
2207 5770592 : dtp = ntp;
2208 :
2209 : /* Allocate memory for CatCTup and the cached tuple in one go */
2210 5774682 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2211 :
2212 11549364 : ct = (CatCTup *) palloc(sizeof(CatCTup) +
2213 5774682 : MAXIMUM_ALIGNOF + dtp->t_len);
2214 5774682 : ct->tuple.t_len = dtp->t_len;
2215 5774682 : ct->tuple.t_self = dtp->t_self;
2216 5774682 : ct->tuple.t_tableOid = dtp->t_tableOid;
2217 5774682 : ct->tuple.t_data = (HeapTupleHeader)
2218 5774682 : MAXALIGN(((char *) ct) + sizeof(CatCTup));
2219 : /* copy tuple contents */
2220 5774682 : memcpy((char *) ct->tuple.t_data,
2221 5774682 : (const char *) dtp->t_data,
2222 5774682 : dtp->t_len);
2223 5774682 : MemoryContextSwitchTo(oldcxt);
2224 :
2225 5774682 : if (dtp != ntp)
2226 4090 : heap_freetuple(dtp);
2227 :
2228 : /* extract keys - they'll point into the tuple if not by-value */
2229 16560796 : for (i = 0; i < cache->cc_nkeys; i++)
2230 : {
2231 : Datum atp;
2232 : bool isnull;
2233 :
2234 10786114 : atp = heap_getattr(&ct->tuple,
2235 : cache->cc_keyno[i],
2236 : cache->cc_tupdesc,
2237 : &isnull);
2238 : Assert(!isnull);
2239 10786114 : ct->keys[i] = atp;
2240 : }
2241 : }
2242 : else
2243 : {
2244 : /* Set up keys for a negative cache entry */
2245 1711604 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2246 1711604 : ct = (CatCTup *) palloc(sizeof(CatCTup));
2247 :
2248 : /*
2249 : * Store keys - they'll point into separately allocated memory if not
2250 : * by-value.
2251 : */
2252 1711604 : CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
2253 1711604 : arguments, ct->keys);
2254 1711604 : MemoryContextSwitchTo(oldcxt);
2255 : }
2256 :
2257 : /*
2258 : * Finish initializing the CatCTup header, and add it to the cache's
2259 : * linked list and counts.
2260 : */
2261 7486286 : ct->ct_magic = CT_MAGIC;
2262 7486286 : ct->my_cache = cache;
2263 7486286 : ct->c_list = NULL;
2264 7486286 : ct->refcount = 0; /* for the moment */
2265 7486286 : ct->dead = false;
2266 7486286 : ct->negative = (ntp == NULL);
2267 7486286 : ct->hash_value = hashValue;
2268 :
2269 7486286 : dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
2270 :
2271 7486286 : cache->cc_ntup++;
2272 7486286 : CacheHdr->ch_ntup++;
2273 :
2274 : /*
2275 : * If the hash table has become too full, enlarge the buckets array. Quite
2276 : * arbitrarily, we enlarge when fill factor > 2.
2277 : */
2278 7486286 : if (cache->cc_ntup > cache->cc_nbuckets * 2)
2279 6436 : RehashCatCache(cache);
2280 :
2281 7486286 : return ct;
2282 : }
2283 :
2284 : /*
2285 : * Helper routine that frees keys stored in the keys array.
2286 : */
2287 : static void
2288 595826 : CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, const int *attnos, const Datum *keys)
2289 : {
2290 : int i;
2291 :
2292 1836570 : for (i = 0; i < nkeys; i++)
2293 : {
2294 1240744 : int attnum = attnos[i];
2295 :
2296 : /* system attribute are not supported in caches */
2297 : Assert(attnum > 0);
2298 :
2299 1240744 : if (!TupleDescCompactAttr(tupdesc, attnum - 1)->attbyval)
2300 525876 : pfree(DatumGetPointer(keys[i]));
2301 : }
2302 595826 : }
2303 :
2304 : /*
2305 : * Helper routine that copies the keys in the srckeys array into the dstkeys
2306 : * one, guaranteeing that the datums are fully allocated in the current memory
2307 : * context.
2308 : */
2309 : static void
2310 2030716 : CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, const int *attnos,
2311 : const Datum *srckeys, Datum *dstkeys)
2312 : {
2313 : int i;
2314 :
2315 : /*
2316 : * XXX: memory and lookup performance could possibly be improved by
2317 : * storing all keys in one allocation.
2318 : */
2319 :
2320 6350100 : for (i = 0; i < nkeys; i++)
2321 : {
2322 4319384 : int attnum = attnos[i];
2323 4319384 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2324 4319384 : Datum src = srckeys[i];
2325 : NameData srcname;
2326 :
2327 : /*
2328 : * Must be careful in case the caller passed a C string where a NAME
2329 : * is wanted: convert the given argument to a correctly padded NAME.
2330 : * Otherwise the memcpy() done by datumCopy() could fall off the end
2331 : * of memory.
2332 : */
2333 4319384 : if (att->atttypid == NAMEOID)
2334 : {
2335 871286 : namestrcpy(&srcname, DatumGetCString(src));
2336 871286 : src = NameGetDatum(&srcname);
2337 : }
2338 :
2339 4319384 : dstkeys[i] = datumCopy(src,
2340 4319384 : att->attbyval,
2341 4319384 : att->attlen);
2342 : }
2343 2030716 : }
2344 :
2345 : /*
2346 : * PrepareToInvalidateCacheTuple()
2347 : *
2348 : * This is part of a rather subtle chain of events, so pay attention:
2349 : *
2350 : * When a tuple is inserted or deleted, it cannot be flushed from the
2351 : * catcaches immediately, for reasons explained at the top of cache/inval.c.
2352 : * Instead we have to add entry(s) for the tuple to a list of pending tuple
2353 : * invalidations that will be done at the end of the command or transaction.
2354 : *
2355 : * The lists of tuples that need to be flushed are kept by inval.c. This
2356 : * routine is a helper routine for inval.c. Given a tuple belonging to
2357 : * the specified relation, find all catcaches it could be in, compute the
2358 : * correct hash value for each such catcache, and call the specified
2359 : * function to record the cache id and hash value in inval.c's lists.
2360 : * SysCacheInvalidate will be called later, if appropriate,
2361 : * using the recorded information.
2362 : *
2363 : * For an insert or delete, tuple is the target tuple and newtuple is NULL.
2364 : * For an update, we are called just once, with tuple being the old tuple
2365 : * version and newtuple the new version. We should make two list entries
2366 : * if the tuple's hash value changed, but only one if it didn't.
2367 : *
2368 : * Note that it is irrelevant whether the given tuple is actually loaded
2369 : * into the catcache at the moment. Even if it's not there now, it might
2370 : * be by the end of the command, or there might be a matching negative entry
2371 : * to flush --- or other backends' caches might have such entries --- so
2372 : * we have to make list entries to flush it later.
2373 : *
2374 : * Also note that it's not an error if there are no catcaches for the
2375 : * specified relation. inval.c doesn't know exactly which rels have
2376 : * catcaches --- it will call this routine for any tuple that's in a
2377 : * system relation.
2378 : */
2379 : void
2380 3301306 : PrepareToInvalidateCacheTuple(Relation relation,
2381 : HeapTuple tuple,
2382 : HeapTuple newtuple,
2383 : void (*function) (int, uint32, Oid, void *),
2384 : void *context)
2385 : {
2386 : slist_iter iter;
2387 : Oid reloid;
2388 :
2389 : CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2390 :
2391 : /*
2392 : * sanity checks
2393 : */
2394 : Assert(RelationIsValid(relation));
2395 : Assert(HeapTupleIsValid(tuple));
2396 : Assert(function);
2397 : Assert(CacheHdr != NULL);
2398 :
2399 3301306 : reloid = RelationGetRelid(relation);
2400 :
2401 : /* ----------------
2402 : * for each cache
2403 : * if the cache contains tuples from the specified relation
2404 : * compute the tuple's hash value(s) in this cache,
2405 : * and call the passed function to register the information.
2406 : * ----------------
2407 : */
2408 :
2409 283912316 : slist_foreach(iter, &CacheHdr->ch_caches)
2410 : {
2411 280611010 : CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2412 : uint32 hashvalue;
2413 : Oid dbid;
2414 :
2415 280611010 : if (ccp->cc_reloid != reloid)
2416 274583852 : continue;
2417 :
2418 : /* Just in case cache hasn't finished initialization yet... */
2419 6027158 : ConditionalCatalogCacheInitializeCache(ccp);
2420 :
2421 6027158 : hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2422 6027158 : dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2423 :
2424 6027158 : (*function) (ccp->id, hashvalue, dbid, context);
2425 :
2426 6027158 : if (newtuple)
2427 : {
2428 : uint32 newhashvalue;
2429 :
2430 432740 : newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2431 :
2432 432740 : if (newhashvalue != hashvalue)
2433 6224 : (*function) (ccp->id, newhashvalue, dbid, context);
2434 : }
2435 : }
2436 3301306 : }
2437 :
2438 : /* ResourceOwner callbacks */
2439 :
2440 : static void
2441 10894 : ResOwnerReleaseCatCache(Datum res)
2442 : {
2443 10894 : ReleaseCatCacheWithOwner((HeapTuple) DatumGetPointer(res), NULL);
2444 10894 : }
2445 :
2446 : static char *
2447 0 : ResOwnerPrintCatCache(Datum res)
2448 : {
2449 0 : HeapTuple tuple = (HeapTuple) DatumGetPointer(res);
2450 0 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
2451 : offsetof(CatCTup, tuple));
2452 :
2453 : /* Safety check to ensure we were handed a cache entry */
2454 : Assert(ct->ct_magic == CT_MAGIC);
2455 :
2456 0 : return psprintf("cache %s (%d), tuple %u/%u has count %d",
2457 0 : ct->my_cache->cc_relname, ct->my_cache->id,
2458 0 : ItemPointerGetBlockNumber(&(tuple->t_self)),
2459 0 : ItemPointerGetOffsetNumber(&(tuple->t_self)),
2460 : ct->refcount);
2461 : }
2462 :
2463 : static void
2464 36 : ResOwnerReleaseCatCacheList(Datum res)
2465 : {
2466 36 : ReleaseCatCacheListWithOwner((CatCList *) DatumGetPointer(res), NULL);
2467 36 : }
2468 :
2469 : static char *
2470 0 : ResOwnerPrintCatCacheList(Datum res)
2471 : {
2472 0 : CatCList *list = (CatCList *) DatumGetPointer(res);
2473 :
2474 0 : return psprintf("cache %s (%d), list %p has count %d",
2475 0 : list->my_cache->cc_relname, list->my_cache->id,
2476 : list, list->refcount);
2477 : }
|