Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * catcache.c
4 : * System catalog cache for tuples matching a key.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/utils/cache/catcache.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include "access/genam.h"
18 : #include "access/heaptoast.h"
19 : #include "access/relscan.h"
20 : #include "access/table.h"
21 : #include "access/xact.h"
22 : #include "catalog/catalog.h"
23 : #include "catalog/pg_collation.h"
24 : #include "catalog/pg_type.h"
25 : #include "common/hashfn.h"
26 : #include "common/pg_prng.h"
27 : #include "miscadmin.h"
28 : #include "port/pg_bitutils.h"
29 : #ifdef CATCACHE_STATS
30 : #include "storage/ipc.h" /* for on_proc_exit */
31 : #endif
32 : #include "storage/lmgr.h"
33 : #include "utils/builtins.h"
34 : #include "utils/catcache.h"
35 : #include "utils/datum.h"
36 : #include "utils/fmgroids.h"
37 : #include "utils/injection_point.h"
38 : #include "utils/inval.h"
39 : #include "utils/memutils.h"
40 : #include "utils/rel.h"
41 : #include "utils/resowner.h"
42 : #include "utils/syscache.h"
43 :
44 : /*
45 : * If a catcache invalidation is processed while we are in the middle of
46 : * creating a catcache entry (or list), it might apply to the entry we're
47 : * creating, making it invalid before it's been inserted to the catcache. To
48 : * catch such cases, we have a stack of "create-in-progress" entries. Cache
49 : * invalidation marks any matching entries in the stack as dead, in addition
50 : * to the actual CatCTup and CatCList entries.
51 : */
52 : typedef struct CatCInProgress
53 : {
54 : CatCache *cache; /* cache that the entry belongs to */
55 : uint32 hash_value; /* hash of the entry; ignored for lists */
56 : bool list; /* is it a list entry? */
57 : bool dead; /* set when the entry is invalidated */
58 : struct CatCInProgress *next;
59 : } CatCInProgress;
60 :
61 : static CatCInProgress *catcache_in_progress_stack = NULL;
62 :
63 : /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
64 :
65 : /*
66 : * Given a hash value and the size of the hash table, find the bucket
67 : * in which the hash value belongs. Since the hash table must contain
68 : * a power-of-2 number of elements, this is a simple bitmask.
69 : */
70 : #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
71 :
72 :
73 : /*
74 : * variables, macros and other stuff
75 : */
76 :
77 : #ifdef CACHEDEBUG
78 : #define CACHE_elog(...) elog(__VA_ARGS__)
79 : #else
80 : #define CACHE_elog(...)
81 : #endif
82 :
83 : /* Cache management header --- pointer is NULL until created */
84 : static CatCacheHeader *CacheHdr = NULL;
85 :
86 : static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
87 : int nkeys,
88 : Datum v1, Datum v2,
89 : Datum v3, Datum v4);
90 :
91 : static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
92 : int nkeys,
93 : uint32 hashValue,
94 : Index hashIndex,
95 : Datum v1, Datum v2,
96 : Datum v3, Datum v4);
97 :
98 : static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
99 : Datum v1, Datum v2, Datum v3, Datum v4);
100 : static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
101 : HeapTuple tuple);
102 : static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
103 : const Datum *cachekeys,
104 : const Datum *searchkeys);
105 :
106 : #ifdef CATCACHE_STATS
107 : static void CatCachePrintStats(int code, Datum arg);
108 : #endif
109 : static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
110 : static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
111 : static void RehashCatCache(CatCache *cp);
112 : static void RehashCatCacheLists(CatCache *cp);
113 : static void CatalogCacheInitializeCache(CatCache *cache);
114 : static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
115 : Datum *arguments,
116 : uint32 hashValue, Index hashIndex);
117 :
118 : static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner);
119 : static void ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner);
120 : static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
121 : Datum *keys);
122 : static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
123 : Datum *srckeys, Datum *dstkeys);
124 :
125 :
126 : /*
127 : * internal support functions
128 : */
129 :
130 : /* ResourceOwner callbacks to hold catcache references */
131 :
132 : static void ResOwnerReleaseCatCache(Datum res);
133 : static char *ResOwnerPrintCatCache(Datum res);
134 : static void ResOwnerReleaseCatCacheList(Datum res);
135 : static char *ResOwnerPrintCatCacheList(Datum res);
136 :
137 : static const ResourceOwnerDesc catcache_resowner_desc =
138 : {
139 : /* catcache references */
140 : .name = "catcache reference",
141 : .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
142 : .release_priority = RELEASE_PRIO_CATCACHE_REFS,
143 : .ReleaseResource = ResOwnerReleaseCatCache,
144 : .DebugPrint = ResOwnerPrintCatCache
145 : };
146 :
147 : static const ResourceOwnerDesc catlistref_resowner_desc =
148 : {
149 : /* catcache-list pins */
150 : .name = "catcache list reference",
151 : .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
152 : .release_priority = RELEASE_PRIO_CATCACHE_LIST_REFS,
153 : .ReleaseResource = ResOwnerReleaseCatCacheList,
154 : .DebugPrint = ResOwnerPrintCatCacheList
155 : };
156 :
157 : /* Convenience wrappers over ResourceOwnerRemember/Forget */
158 : static inline void
159 87759642 : ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
160 : {
161 87759642 : ResourceOwnerRemember(owner, PointerGetDatum(tuple), &catcache_resowner_desc);
162 87759642 : }
163 : static inline void
164 87749016 : ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
165 : {
166 87749016 : ResourceOwnerForget(owner, PointerGetDatum(tuple), &catcache_resowner_desc);
167 87749016 : }
168 : static inline void
169 3868000 : ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
170 : {
171 3868000 : ResourceOwnerRemember(owner, PointerGetDatum(list), &catlistref_resowner_desc);
172 3868000 : }
173 : static inline void
174 3867964 : ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
175 : {
176 3867964 : ResourceOwnerForget(owner, PointerGetDatum(list), &catlistref_resowner_desc);
177 3867964 : }
178 :
179 :
180 : /*
181 : * Hash and equality functions for system types that are used as cache key
182 : * fields. In some cases, we just call the regular SQL-callable functions for
183 : * the appropriate data type, but that tends to be a little slow, and the
184 : * speed of these functions is performance-critical. Therefore, for data
185 : * types that frequently occur as catcache keys, we hard-code the logic here.
186 : * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
187 : * in certain cases (like int4) we can adopt a faster hash algorithm as well.
188 : */
189 :
190 : static bool
191 5688656 : chareqfast(Datum a, Datum b)
192 : {
193 5688656 : return DatumGetChar(a) == DatumGetChar(b);
194 : }
195 :
196 : static uint32
197 6536084 : charhashfast(Datum datum)
198 : {
199 6536084 : return murmurhash32((int32) DatumGetChar(datum));
200 : }
201 :
202 : static bool
203 4008694 : nameeqfast(Datum a, Datum b)
204 : {
205 4008694 : char *ca = NameStr(*DatumGetName(a));
206 4008694 : char *cb = NameStr(*DatumGetName(b));
207 :
208 4008694 : return strncmp(ca, cb, NAMEDATALEN) == 0;
209 : }
210 :
211 : static uint32
212 9036684 : namehashfast(Datum datum)
213 : {
214 9036684 : char *key = NameStr(*DatumGetName(datum));
215 :
216 9036684 : return hash_any((unsigned char *) key, strlen(key));
217 : }
218 :
219 : static bool
220 8973992 : int2eqfast(Datum a, Datum b)
221 : {
222 8973992 : return DatumGetInt16(a) == DatumGetInt16(b);
223 : }
224 :
225 : static uint32
226 12381450 : int2hashfast(Datum datum)
227 : {
228 12381450 : return murmurhash32((int32) DatumGetInt16(datum));
229 : }
230 :
231 : static bool
232 102370966 : int4eqfast(Datum a, Datum b)
233 : {
234 102370966 : return DatumGetInt32(a) == DatumGetInt32(b);
235 : }
236 :
237 : static uint32
238 119809516 : int4hashfast(Datum datum)
239 : {
240 119809516 : return murmurhash32((int32) DatumGetInt32(datum));
241 : }
242 :
243 : static bool
244 166 : texteqfast(Datum a, Datum b)
245 : {
246 : /*
247 : * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
248 : * want to take the fast "deterministic" path in texteq().
249 : */
250 166 : return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
251 : }
252 :
253 : static uint32
254 3704 : texthashfast(Datum datum)
255 : {
256 : /* analogously here as in texteqfast() */
257 3704 : return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
258 : }
259 :
260 : static bool
261 3060 : oidvectoreqfast(Datum a, Datum b)
262 : {
263 3060 : return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b));
264 : }
265 :
266 : static uint32
267 398764 : oidvectorhashfast(Datum datum)
268 : {
269 398764 : return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum));
270 : }
271 :
272 : /* Lookup support functions for a type. */
273 : static void
274 1286076 : GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
275 : {
276 1286076 : switch (keytype)
277 : {
278 18254 : case BOOLOID:
279 18254 : *hashfunc = charhashfast;
280 18254 : *fasteqfunc = chareqfast;
281 18254 : *eqfunc = F_BOOLEQ;
282 18254 : break;
283 23006 : case CHAROID:
284 23006 : *hashfunc = charhashfast;
285 23006 : *fasteqfunc = chareqfast;
286 23006 : *eqfunc = F_CHAREQ;
287 23006 : break;
288 236394 : case NAMEOID:
289 236394 : *hashfunc = namehashfast;
290 236394 : *fasteqfunc = nameeqfast;
291 236394 : *eqfunc = F_NAMEEQ;
292 236394 : break;
293 77780 : case INT2OID:
294 77780 : *hashfunc = int2hashfast;
295 77780 : *fasteqfunc = int2eqfast;
296 77780 : *eqfunc = F_INT2EQ;
297 77780 : break;
298 17888 : case INT4OID:
299 17888 : *hashfunc = int4hashfast;
300 17888 : *fasteqfunc = int4eqfast;
301 17888 : *eqfunc = F_INT4EQ;
302 17888 : break;
303 7966 : case TEXTOID:
304 7966 : *hashfunc = texthashfast;
305 7966 : *fasteqfunc = texteqfast;
306 7966 : *eqfunc = F_TEXTEQ;
307 7966 : break;
308 886122 : case OIDOID:
309 : case REGPROCOID:
310 : case REGPROCEDUREOID:
311 : case REGOPEROID:
312 : case REGOPERATOROID:
313 : case REGCLASSOID:
314 : case REGTYPEOID:
315 : case REGCOLLATIONOID:
316 : case REGCONFIGOID:
317 : case REGDICTIONARYOID:
318 : case REGROLEOID:
319 : case REGNAMESPACEOID:
320 886122 : *hashfunc = int4hashfast;
321 886122 : *fasteqfunc = int4eqfast;
322 886122 : *eqfunc = F_OIDEQ;
323 886122 : break;
324 18666 : case OIDVECTOROID:
325 18666 : *hashfunc = oidvectorhashfast;
326 18666 : *fasteqfunc = oidvectoreqfast;
327 18666 : *eqfunc = F_OIDVECTOREQ;
328 18666 : break;
329 0 : default:
330 0 : elog(FATAL, "type %u not supported as catcache key", keytype);
331 : *hashfunc = NULL; /* keep compiler quiet */
332 :
333 : *eqfunc = InvalidOid;
334 : break;
335 : }
336 1286076 : }
337 :
338 : /*
339 : * CatalogCacheComputeHashValue
340 : *
341 : * Compute the hash value associated with a given set of lookup keys
342 : */
343 : static uint32
344 105879984 : CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
345 : Datum v1, Datum v2, Datum v3, Datum v4)
346 : {
347 105879984 : uint32 hashValue = 0;
348 : uint32 oneHash;
349 105879984 : CCHashFN *cc_hashfunc = cache->cc_hashfunc;
350 :
351 : CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
352 : cache->cc_relname, nkeys, cache);
353 :
354 105879984 : switch (nkeys)
355 : {
356 4884872 : case 4:
357 4884872 : oneHash = (cc_hashfunc[3]) (v4);
358 4884872 : hashValue ^= pg_rotate_left32(oneHash, 24);
359 : /* FALLTHROUGH */
360 12253230 : case 3:
361 12253230 : oneHash = (cc_hashfunc[2]) (v3);
362 12253230 : hashValue ^= pg_rotate_left32(oneHash, 16);
363 : /* FALLTHROUGH */
364 25148116 : case 2:
365 25148116 : oneHash = (cc_hashfunc[1]) (v2);
366 25148116 : hashValue ^= pg_rotate_left32(oneHash, 8);
367 : /* FALLTHROUGH */
368 105879984 : case 1:
369 105879984 : oneHash = (cc_hashfunc[0]) (v1);
370 105879984 : hashValue ^= oneHash;
371 105879984 : break;
372 0 : default:
373 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
374 : break;
375 : }
376 :
377 105879984 : return hashValue;
378 : }
379 :
380 : /*
381 : * CatalogCacheComputeTupleHashValue
382 : *
383 : * Compute the hash value associated with a given tuple to be cached
384 : */
385 : static uint32
386 7143200 : CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
387 : {
388 7143200 : Datum v1 = 0,
389 7143200 : v2 = 0,
390 7143200 : v3 = 0,
391 7143200 : v4 = 0;
392 7143200 : bool isNull = false;
393 7143200 : int *cc_keyno = cache->cc_keyno;
394 7143200 : TupleDesc cc_tupdesc = cache->cc_tupdesc;
395 :
396 : /* Now extract key fields from tuple, insert into scankey */
397 7143200 : switch (nkeys)
398 : {
399 472574 : case 4:
400 472574 : v4 = fastgetattr(tuple,
401 472574 : cc_keyno[3],
402 : cc_tupdesc,
403 : &isNull);
404 : Assert(!isNull);
405 : /* FALLTHROUGH */
406 1322420 : case 3:
407 1322420 : v3 = fastgetattr(tuple,
408 1322420 : cc_keyno[2],
409 : cc_tupdesc,
410 : &isNull);
411 : Assert(!isNull);
412 : /* FALLTHROUGH */
413 5226096 : case 2:
414 5226096 : v2 = fastgetattr(tuple,
415 5226096 : cc_keyno[1],
416 : cc_tupdesc,
417 : &isNull);
418 : Assert(!isNull);
419 : /* FALLTHROUGH */
420 7143200 : case 1:
421 7143200 : v1 = fastgetattr(tuple,
422 : cc_keyno[0],
423 : cc_tupdesc,
424 : &isNull);
425 : Assert(!isNull);
426 7143200 : break;
427 0 : default:
428 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
429 : break;
430 : }
431 :
432 7143200 : return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
433 : }
434 :
435 : /*
436 : * CatalogCacheCompareTuple
437 : *
438 : * Compare a tuple to the passed arguments.
439 : */
440 : static inline bool
441 91035444 : CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
442 : const Datum *cachekeys,
443 : const Datum *searchkeys)
444 : {
445 91035444 : const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
446 : int i;
447 :
448 212080978 : for (i = 0; i < nkeys; i++)
449 : {
450 121045534 : if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
451 0 : return false;
452 : }
453 91035444 : return true;
454 : }
455 :
456 :
457 : #ifdef CATCACHE_STATS
458 :
459 : static void
460 : CatCachePrintStats(int code, Datum arg)
461 : {
462 : slist_iter iter;
463 : long cc_searches = 0;
464 : long cc_hits = 0;
465 : long cc_neg_hits = 0;
466 : long cc_newloads = 0;
467 : long cc_invals = 0;
468 : long cc_nlists = 0;
469 : long cc_lsearches = 0;
470 : long cc_lhits = 0;
471 :
472 : slist_foreach(iter, &CacheHdr->ch_caches)
473 : {
474 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
475 :
476 : if (cache->cc_ntup == 0 && cache->cc_searches == 0)
477 : continue; /* don't print unused caches */
478 : elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %d lists, %ld lsrch, %ld lhits",
479 : cache->cc_relname,
480 : cache->cc_indexoid,
481 : cache->cc_ntup,
482 : cache->cc_searches,
483 : cache->cc_hits,
484 : cache->cc_neg_hits,
485 : cache->cc_hits + cache->cc_neg_hits,
486 : cache->cc_newloads,
487 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
488 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
489 : cache->cc_invals,
490 : cache->cc_nlist,
491 : cache->cc_lsearches,
492 : cache->cc_lhits);
493 : cc_searches += cache->cc_searches;
494 : cc_hits += cache->cc_hits;
495 : cc_neg_hits += cache->cc_neg_hits;
496 : cc_newloads += cache->cc_newloads;
497 : cc_invals += cache->cc_invals;
498 : cc_nlists += cache->cc_nlist;
499 : cc_lsearches += cache->cc_lsearches;
500 : cc_lhits += cache->cc_lhits;
501 : }
502 : elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lists, %ld lsrch, %ld lhits",
503 : CacheHdr->ch_ntup,
504 : cc_searches,
505 : cc_hits,
506 : cc_neg_hits,
507 : cc_hits + cc_neg_hits,
508 : cc_newloads,
509 : cc_searches - cc_hits - cc_neg_hits - cc_newloads,
510 : cc_searches - cc_hits - cc_neg_hits,
511 : cc_invals,
512 : cc_nlists,
513 : cc_lsearches,
514 : cc_lhits);
515 : }
516 : #endif /* CATCACHE_STATS */
517 :
518 :
519 : /*
520 : * CatCacheRemoveCTup
521 : *
522 : * Unlink and delete the given cache entry
523 : *
524 : * NB: if it is a member of a CatCList, the CatCList is deleted too.
525 : * Both the cache entry and the list had better have zero refcount.
526 : */
527 : static void
528 1565550 : CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
529 : {
530 : Assert(ct->refcount == 0);
531 : Assert(ct->my_cache == cache);
532 :
533 1565550 : if (ct->c_list)
534 : {
535 : /*
536 : * The cleanest way to handle this is to call CatCacheRemoveCList,
537 : * which will recurse back to me, and the recursive call will do the
538 : * work. Set the "dead" flag to make sure it does recurse.
539 : */
540 0 : ct->dead = true;
541 0 : CatCacheRemoveCList(cache, ct->c_list);
542 0 : return; /* nothing left to do */
543 : }
544 :
545 : /* delink from linked list */
546 1565550 : dlist_delete(&ct->cache_elem);
547 :
548 : /*
549 : * Free keys when we're dealing with a negative entry, normal entries just
550 : * point into tuple, allocated together with the CatCTup.
551 : */
552 1565550 : if (ct->negative)
553 455450 : CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
554 455450 : cache->cc_keyno, ct->keys);
555 :
556 1565550 : pfree(ct);
557 :
558 1565550 : --cache->cc_ntup;
559 1565550 : --CacheHdr->ch_ntup;
560 : }
561 :
562 : /*
563 : * CatCacheRemoveCList
564 : *
565 : * Unlink and delete the given cache list entry
566 : *
567 : * NB: any dead member entries that become unreferenced are deleted too.
568 : */
569 : static void
570 126718 : CatCacheRemoveCList(CatCache *cache, CatCList *cl)
571 : {
572 : int i;
573 :
574 : Assert(cl->refcount == 0);
575 : Assert(cl->my_cache == cache);
576 :
577 : /* delink from member tuples */
578 421608 : for (i = cl->n_members; --i >= 0;)
579 : {
580 294890 : CatCTup *ct = cl->members[i];
581 :
582 : Assert(ct->c_list == cl);
583 294890 : ct->c_list = NULL;
584 : /* if the member is dead and now has no references, remove it */
585 294890 : if (
586 : #ifndef CATCACHE_FORCE_RELEASE
587 294890 : ct->dead &&
588 : #endif
589 144 : ct->refcount == 0)
590 144 : CatCacheRemoveCTup(cache, ct);
591 : }
592 :
593 : /* delink from linked list */
594 126718 : dlist_delete(&cl->cache_elem);
595 :
596 : /* free associated column data */
597 126718 : CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
598 126718 : cache->cc_keyno, cl->keys);
599 :
600 126718 : pfree(cl);
601 :
602 126718 : --cache->cc_nlist;
603 126718 : }
604 :
605 :
606 : /*
607 : * CatCacheInvalidate
608 : *
609 : * Invalidate entries in the specified cache, given a hash value.
610 : *
611 : * We delete cache entries that match the hash value, whether positive
612 : * or negative. We don't care whether the invalidation is the result
613 : * of a tuple insertion or a deletion.
614 : *
615 : * We used to try to match positive cache entries by TID, but that is
616 : * unsafe after a VACUUM FULL on a system catalog: an inval event could
617 : * be queued before VACUUM FULL, and then processed afterwards, when the
618 : * target tuple that has to be invalidated has a different TID than it
619 : * did when the event was created. So now we just compare hash values and
620 : * accept the small risk of unnecessary invalidations due to false matches.
621 : *
622 : * This routine is only quasi-public: it should only be used by inval.c.
623 : */
624 : void
625 20449100 : CatCacheInvalidate(CatCache *cache, uint32 hashValue)
626 : {
627 : Index hashIndex;
628 : dlist_mutable_iter iter;
629 :
630 : CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
631 :
632 : /*
633 : * We don't bother to check whether the cache has finished initialization
634 : * yet; if not, there will be no entries in it so no problem.
635 : */
636 :
637 : /*
638 : * Invalidate *all* CatCLists in this cache; it's too hard to tell which
639 : * searches might still be correct, so just zap 'em all.
640 : */
641 24155628 : for (int i = 0; i < cache->cc_nlbuckets; i++)
642 : {
643 3706528 : dlist_head *bucket = &cache->cc_lbucket[i];
644 :
645 3828052 : dlist_foreach_modify(iter, bucket)
646 : {
647 121524 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
648 :
649 121524 : if (cl->refcount > 0)
650 144 : cl->dead = true;
651 : else
652 121380 : CatCacheRemoveCList(cache, cl);
653 : }
654 : }
655 :
656 : /*
657 : * inspect the proper hash bucket for tuple matches
658 : */
659 20449100 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
660 28156214 : dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
661 : {
662 7707114 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
663 :
664 7707114 : if (hashValue == ct->hash_value)
665 : {
666 1388446 : if (ct->refcount > 0 ||
667 1387028 : (ct->c_list && ct->c_list->refcount > 0))
668 : {
669 1562 : ct->dead = true;
670 : /* list, if any, was marked dead above */
671 1562 : Assert(ct->c_list == NULL || ct->c_list->dead);
672 : }
673 : else
674 1386884 : CatCacheRemoveCTup(cache, ct);
675 : CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
676 : #ifdef CATCACHE_STATS
677 : cache->cc_invals++;
678 : #endif
679 : /* could be multiple matches, so keep looking! */
680 : }
681 : }
682 :
683 : /* Also invalidate any entries that are being built */
684 20577182 : for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
685 : {
686 128082 : if (e->cache == cache)
687 : {
688 644 : if (e->list || e->hash_value == hashValue)
689 622 : e->dead = true;
690 : }
691 : }
692 20449100 : }
693 :
694 : /* ----------------------------------------------------------------
695 : * public functions
696 : * ----------------------------------------------------------------
697 : */
698 :
699 :
700 : /*
701 : * Standard routine for creating cache context if it doesn't exist yet
702 : *
703 : * There are a lot of places (probably far more than necessary) that check
704 : * whether CacheMemoryContext exists yet and want to create it if not.
705 : * We centralize knowledge of exactly how to create it here.
706 : */
707 : void
708 35320 : CreateCacheMemoryContext(void)
709 : {
710 : /*
711 : * Purely for paranoia, check that context doesn't exist; caller probably
712 : * did so already.
713 : */
714 35320 : if (!CacheMemoryContext)
715 35320 : CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
716 : "CacheMemoryContext",
717 : ALLOCSET_DEFAULT_SIZES);
718 35320 : }
719 :
720 :
721 : /*
722 : * ResetCatalogCache
723 : *
724 : * Reset one catalog cache to empty.
725 : *
726 : * This is not very efficient if the target cache is nearly empty.
727 : * However, it shouldn't need to be efficient; we don't invoke it often.
728 : *
729 : * If 'debug_discard' is true, we are being called as part of
730 : * debug_discard_caches. In that case, the cache is not reset for
731 : * correctness, but just to get more testing of cache invalidation. We skip
732 : * resetting in-progress build entries in that case, or we'd never make any
733 : * progress.
734 : */
735 : static void
736 352424 : ResetCatalogCache(CatCache *cache, bool debug_discard)
737 : {
738 : dlist_mutable_iter iter;
739 : int i;
740 :
741 : /* Remove each list in this cache, or at least mark it dead */
742 395496 : for (i = 0; i < cache->cc_nlbuckets; i++)
743 : {
744 43072 : dlist_head *bucket = &cache->cc_lbucket[i];
745 :
746 48404 : dlist_foreach_modify(iter, bucket)
747 : {
748 5332 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
749 :
750 5332 : if (cl->refcount > 0)
751 0 : cl->dead = true;
752 : else
753 5332 : CatCacheRemoveCList(cache, cl);
754 : }
755 : }
756 :
757 : /* Remove each tuple in this cache, or at least mark it dead */
758 10553072 : for (i = 0; i < cache->cc_nbuckets; i++)
759 : {
760 10200648 : dlist_head *bucket = &cache->cc_bucket[i];
761 :
762 10377766 : dlist_foreach_modify(iter, bucket)
763 : {
764 177118 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
765 :
766 177118 : if (ct->refcount > 0 ||
767 177106 : (ct->c_list && ct->c_list->refcount > 0))
768 : {
769 12 : ct->dead = true;
770 : /* list, if any, was marked dead above */
771 12 : Assert(ct->c_list == NULL || ct->c_list->dead);
772 : }
773 : else
774 177106 : CatCacheRemoveCTup(cache, ct);
775 : #ifdef CATCACHE_STATS
776 : cache->cc_invals++;
777 : #endif
778 : }
779 : }
780 :
781 : /* Also invalidate any entries that are being built */
782 352424 : if (!debug_discard)
783 : {
784 352768 : for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
785 : {
786 344 : if (e->cache == cache)
787 4 : e->dead = true;
788 : }
789 : }
790 352424 : }
791 :
792 : /*
793 : * ResetCatalogCaches
794 : *
795 : * Reset all caches when a shared cache inval event forces it
796 : */
797 : void
798 0 : ResetCatalogCaches(void)
799 : {
800 0 : ResetCatalogCachesExt(false);
801 0 : }
802 :
803 : void
804 4134 : ResetCatalogCachesExt(bool debug_discard)
805 : {
806 : slist_iter iter;
807 :
808 : CACHE_elog(DEBUG2, "ResetCatalogCaches called");
809 :
810 355524 : slist_foreach(iter, &CacheHdr->ch_caches)
811 : {
812 351390 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
813 :
814 351390 : ResetCatalogCache(cache, debug_discard);
815 : }
816 :
817 : CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
818 4134 : }
819 :
820 : /*
821 : * CatalogCacheFlushCatalog
822 : *
823 : * Flush all catcache entries that came from the specified system catalog.
824 : * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
825 : * tuples very likely now have different TIDs than before. (At one point
826 : * we also tried to force re-execution of CatalogCacheInitializeCache for
827 : * the cache(s) on that catalog. This is a bad idea since it leads to all
828 : * kinds of trouble if a cache flush occurs while loading cache entries.
829 : * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
830 : * rather than relying on the relcache to keep a tupdesc for us. Of course
831 : * this assumes the tupdesc of a cachable system table will not change...)
832 : */
833 : void
834 772 : CatalogCacheFlushCatalog(Oid catId)
835 : {
836 : slist_iter iter;
837 :
838 : CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
839 :
840 66392 : slist_foreach(iter, &CacheHdr->ch_caches)
841 : {
842 65620 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
843 :
844 : /* Does this cache store tuples of the target catalog? */
845 65620 : if (cache->cc_reloid == catId)
846 : {
847 : /* Yes, so flush all its contents */
848 1034 : ResetCatalogCache(cache, false);
849 :
850 : /* Tell inval.c to call syscache callbacks for this cache */
851 1034 : CallSyscacheCallbacks(cache->id, 0);
852 : }
853 : }
854 :
855 : CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
856 772 : }
857 :
858 : /*
859 : * InitCatCache
860 : *
861 : * This allocates and initializes a cache for a system catalog relation.
862 : * Actually, the cache is only partially initialized to avoid opening the
863 : * relation. The relation will be opened and the rest of the cache
864 : * structure initialized on the first access.
865 : */
866 : #ifdef CACHEDEBUG
867 : #define InitCatCache_DEBUG2 \
868 : do { \
869 : elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
870 : cp->cc_reloid, cp->cc_indexoid, cp->id, \
871 : cp->cc_nkeys, cp->cc_nbuckets); \
872 : } while(0)
873 : #else
874 : #define InitCatCache_DEBUG2
875 : #endif
876 :
877 : CatCache *
878 3002200 : InitCatCache(int id,
879 : Oid reloid,
880 : Oid indexoid,
881 : int nkeys,
882 : const int *key,
883 : int nbuckets)
884 : {
885 : CatCache *cp;
886 : MemoryContext oldcxt;
887 : int i;
888 :
889 : /*
890 : * nbuckets is the initial number of hash buckets to use in this catcache.
891 : * It will be enlarged later if it becomes too full.
892 : *
893 : * nbuckets must be a power of two. We check this via Assert rather than
894 : * a full runtime check because the values will be coming from constant
895 : * tables.
896 : *
897 : * If you're confused by the power-of-two check, see comments in
898 : * bitmapset.c for an explanation.
899 : */
900 : Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
901 :
902 : /*
903 : * first switch to the cache context so our allocations do not vanish at
904 : * the end of a transaction
905 : */
906 3002200 : if (!CacheMemoryContext)
907 0 : CreateCacheMemoryContext();
908 :
909 3002200 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
910 :
911 : /*
912 : * if first time through, initialize the cache group header
913 : */
914 3002200 : if (CacheHdr == NULL)
915 : {
916 35320 : CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
917 35320 : slist_init(&CacheHdr->ch_caches);
918 35320 : CacheHdr->ch_ntup = 0;
919 : #ifdef CATCACHE_STATS
920 : /* set up to dump stats at backend exit */
921 : on_proc_exit(CatCachePrintStats, 0);
922 : #endif
923 : }
924 :
925 : /*
926 : * Allocate a new cache structure, aligning to a cacheline boundary
927 : *
928 : * Note: we rely on zeroing to initialize all the dlist headers correctly
929 : */
930 3002200 : cp = (CatCache *) palloc_aligned(sizeof(CatCache), PG_CACHE_LINE_SIZE,
931 : MCXT_ALLOC_ZERO);
932 3002200 : cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
933 :
934 : /*
935 : * Many catcaches never receive any list searches. Therefore, we don't
936 : * allocate the cc_lbuckets till we get a list search.
937 : */
938 3002200 : cp->cc_lbucket = NULL;
939 :
940 : /*
941 : * initialize the cache's relation information for the relation
942 : * corresponding to this cache, and initialize some of the new cache's
943 : * other internal fields. But don't open the relation yet.
944 : */
945 3002200 : cp->id = id;
946 3002200 : cp->cc_relname = "(not known yet)";
947 3002200 : cp->cc_reloid = reloid;
948 3002200 : cp->cc_indexoid = indexoid;
949 3002200 : cp->cc_relisshared = false; /* temporary */
950 3002200 : cp->cc_tupdesc = (TupleDesc) NULL;
951 3002200 : cp->cc_ntup = 0;
952 3002200 : cp->cc_nlist = 0;
953 3002200 : cp->cc_nbuckets = nbuckets;
954 3002200 : cp->cc_nlbuckets = 0;
955 3002200 : cp->cc_nkeys = nkeys;
956 7841040 : for (i = 0; i < nkeys; ++i)
957 : {
958 : Assert(AttributeNumberIsValid(key[i]));
959 4838840 : cp->cc_keyno[i] = key[i];
960 : }
961 :
962 : /*
963 : * new cache is initialized as far as we can go for now. print some
964 : * debugging information, if appropriate.
965 : */
966 : InitCatCache_DEBUG2;
967 :
968 : /*
969 : * add completed cache to top of group header's list
970 : */
971 3002200 : slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
972 :
973 : /*
974 : * back to the old context before we return...
975 : */
976 3002200 : MemoryContextSwitchTo(oldcxt);
977 :
978 3002200 : return cp;
979 : }
980 :
981 : /*
982 : * Enlarge a catcache, doubling the number of buckets.
983 : */
984 : static void
985 6516 : RehashCatCache(CatCache *cp)
986 : {
987 : dlist_head *newbucket;
988 : int newnbuckets;
989 : int i;
990 :
991 6516 : elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
992 : cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
993 :
994 : /* Allocate a new, larger, hash table. */
995 6516 : newnbuckets = cp->cc_nbuckets * 2;
996 6516 : newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
997 :
998 : /* Move all entries from old hash table to new. */
999 570452 : for (i = 0; i < cp->cc_nbuckets; i++)
1000 : {
1001 : dlist_mutable_iter iter;
1002 :
1003 1698324 : dlist_foreach_modify(iter, &cp->cc_bucket[i])
1004 : {
1005 1134388 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
1006 1134388 : int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
1007 :
1008 1134388 : dlist_delete(iter.cur);
1009 1134388 : dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
1010 : }
1011 : }
1012 :
1013 : /* Switch to the new array. */
1014 6516 : pfree(cp->cc_bucket);
1015 6516 : cp->cc_nbuckets = newnbuckets;
1016 6516 : cp->cc_bucket = newbucket;
1017 6516 : }
1018 :
1019 : /*
1020 : * Enlarge a catcache's list storage, doubling the number of buckets.
1021 : */
1022 : static void
1023 1220 : RehashCatCacheLists(CatCache *cp)
1024 : {
1025 : dlist_head *newbucket;
1026 : int newnbuckets;
1027 : int i;
1028 :
1029 1220 : elog(DEBUG1, "rehashing catalog cache id %d for %s; %d lists, %d buckets",
1030 : cp->id, cp->cc_relname, cp->cc_nlist, cp->cc_nlbuckets);
1031 :
1032 : /* Allocate a new, larger, hash table. */
1033 1220 : newnbuckets = cp->cc_nlbuckets * 2;
1034 1220 : newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
1035 :
1036 : /* Move all entries from old hash table to new. */
1037 45636 : for (i = 0; i < cp->cc_nlbuckets; i++)
1038 : {
1039 : dlist_mutable_iter iter;
1040 :
1041 134468 : dlist_foreach_modify(iter, &cp->cc_lbucket[i])
1042 : {
1043 90052 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
1044 90052 : int hashIndex = HASH_INDEX(cl->hash_value, newnbuckets);
1045 :
1046 90052 : dlist_delete(iter.cur);
1047 90052 : dlist_push_head(&newbucket[hashIndex], &cl->cache_elem);
1048 : }
1049 : }
1050 :
1051 : /* Switch to the new array. */
1052 1220 : pfree(cp->cc_lbucket);
1053 1220 : cp->cc_nlbuckets = newnbuckets;
1054 1220 : cp->cc_lbucket = newbucket;
1055 1220 : }
1056 :
1057 : /*
1058 : * ConditionalCatalogCacheInitializeCache
1059 : *
1060 : * Call CatalogCacheInitializeCache() if not yet done.
1061 : */
1062 : pg_attribute_always_inline
1063 : static void
1064 104779338 : ConditionalCatalogCacheInitializeCache(CatCache *cache)
1065 : {
1066 : #ifdef USE_ASSERT_CHECKING
1067 : /*
1068 : * TypeCacheRelCallback() runs outside transactions and relies on TYPEOID
1069 : * for hashing. This isn't ideal. Since lookup_type_cache() both
1070 : * registers the callback and searches TYPEOID, reaching trouble likely
1071 : * requires OOM at an unlucky moment.
1072 : *
1073 : * InvalidateAttoptCacheCallback() runs outside transactions and likewise
1074 : * relies on ATTNUM. InitPostgres() initializes ATTNUM, so it's reliable.
1075 : */
1076 : if (!(cache->id == TYPEOID || cache->id == ATTNUM) ||
1077 : IsTransactionState())
1078 : AssertCouldGetRelation();
1079 : else
1080 : Assert(cache->cc_tupdesc != NULL);
1081 : #endif
1082 :
1083 104779338 : if (unlikely(cache->cc_tupdesc == NULL))
1084 802298 : CatalogCacheInitializeCache(cache);
1085 104779328 : }
1086 :
1087 : /*
1088 : * CatalogCacheInitializeCache
1089 : *
1090 : * This function does final initialization of a catcache: obtain the tuple
1091 : * descriptor and set up the hash and equality function links.
1092 : */
1093 : #ifdef CACHEDEBUG
1094 : #define CatalogCacheInitializeCache_DEBUG1 \
1095 : elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
1096 : cache->cc_reloid)
1097 :
1098 : #define CatalogCacheInitializeCache_DEBUG2 \
1099 : do { \
1100 : if (cache->cc_keyno[i] > 0) { \
1101 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
1102 : i+1, cache->cc_nkeys, cache->cc_keyno[i], \
1103 : TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
1104 : } else { \
1105 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
1106 : i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
1107 : } \
1108 : } while(0)
1109 : #else
1110 : #define CatalogCacheInitializeCache_DEBUG1
1111 : #define CatalogCacheInitializeCache_DEBUG2
1112 : #endif
1113 :
1114 : static void
1115 802298 : CatalogCacheInitializeCache(CatCache *cache)
1116 : {
1117 : Relation relation;
1118 : MemoryContext oldcxt;
1119 : TupleDesc tupdesc;
1120 : int i;
1121 :
1122 : CatalogCacheInitializeCache_DEBUG1;
1123 :
1124 802298 : relation = table_open(cache->cc_reloid, AccessShareLock);
1125 :
1126 : /*
1127 : * switch to the cache context so our allocations do not vanish at the end
1128 : * of a transaction
1129 : */
1130 : Assert(CacheMemoryContext != NULL);
1131 :
1132 802288 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1133 :
1134 : /*
1135 : * copy the relcache's tuple descriptor to permanent cache storage
1136 : */
1137 802288 : tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
1138 :
1139 : /*
1140 : * save the relation's name and relisshared flag, too (cc_relname is used
1141 : * only for debugging purposes)
1142 : */
1143 802288 : cache->cc_relname = pstrdup(RelationGetRelationName(relation));
1144 802288 : cache->cc_relisshared = RelationGetForm(relation)->relisshared;
1145 :
1146 : /*
1147 : * return to the caller's memory context and close the rel
1148 : */
1149 802288 : MemoryContextSwitchTo(oldcxt);
1150 :
1151 802288 : table_close(relation, AccessShareLock);
1152 :
1153 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
1154 : cache->cc_relname, cache->cc_nkeys);
1155 :
1156 : /*
1157 : * initialize cache's key information
1158 : */
1159 2088364 : for (i = 0; i < cache->cc_nkeys; ++i)
1160 : {
1161 : Oid keytype;
1162 : RegProcedure eqfunc;
1163 :
1164 : CatalogCacheInitializeCache_DEBUG2;
1165 :
1166 1286076 : if (cache->cc_keyno[i] > 0)
1167 : {
1168 1286076 : Form_pg_attribute attr = TupleDescAttr(tupdesc,
1169 1286076 : cache->cc_keyno[i] - 1);
1170 :
1171 1286076 : keytype = attr->atttypid;
1172 : /* cache key columns should always be NOT NULL */
1173 : Assert(attr->attnotnull);
1174 : }
1175 : else
1176 : {
1177 0 : if (cache->cc_keyno[i] < 0)
1178 0 : elog(FATAL, "sys attributes are not supported in caches");
1179 0 : keytype = OIDOID;
1180 : }
1181 :
1182 1286076 : GetCCHashEqFuncs(keytype,
1183 : &cache->cc_hashfunc[i],
1184 : &eqfunc,
1185 : &cache->cc_fastequal[i]);
1186 :
1187 : /*
1188 : * Do equality-function lookup (we assume this won't need a catalog
1189 : * lookup for any supported type)
1190 : */
1191 1286076 : fmgr_info_cxt(eqfunc,
1192 : &cache->cc_skey[i].sk_func,
1193 : CacheMemoryContext);
1194 :
1195 : /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1196 1286076 : cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
1197 :
1198 : /* Fill in sk_strategy as well --- always standard equality */
1199 1286076 : cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
1200 1286076 : cache->cc_skey[i].sk_subtype = InvalidOid;
1201 : /* If a catcache key requires a collation, it must be C collation */
1202 1286076 : cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1203 :
1204 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1205 : cache->cc_relname, i, cache);
1206 : }
1207 :
1208 : /*
1209 : * mark this cache fully initialized
1210 : */
1211 802288 : cache->cc_tupdesc = tupdesc;
1212 802288 : }
1213 :
1214 : /*
1215 : * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1216 : *
1217 : * One reason to call this routine is to ensure that the relcache has
1218 : * created entries for all the catalogs and indexes referenced by catcaches.
1219 : * Therefore, provide an option to open the index as well as fixing the
1220 : * cache itself. An exception is the indexes on pg_am, which we don't use
1221 : * (cf. IndexScanOK).
1222 : */
1223 : void
1224 321554 : InitCatCachePhase2(CatCache *cache, bool touch_index)
1225 : {
1226 321554 : ConditionalCatalogCacheInitializeCache(cache);
1227 :
1228 321550 : if (touch_index &&
1229 287734 : cache->id != AMOID &&
1230 284348 : cache->id != AMNAME)
1231 : {
1232 : Relation idesc;
1233 :
1234 : /*
1235 : * We must lock the underlying catalog before opening the index to
1236 : * avoid deadlock, since index_open could possibly result in reading
1237 : * this same catalog, and if anyone else is exclusive-locking this
1238 : * catalog and index they'll be doing it in that order.
1239 : */
1240 280962 : LockRelationOid(cache->cc_reloid, AccessShareLock);
1241 280962 : idesc = index_open(cache->cc_indexoid, AccessShareLock);
1242 :
1243 : /*
1244 : * While we've got the index open, let's check that it's unique (and
1245 : * not just deferrable-unique, thank you very much). This is just to
1246 : * catch thinkos in definitions of new catcaches, so we don't worry
1247 : * about the pg_am indexes not getting tested.
1248 : */
1249 : Assert(idesc->rd_index->indisunique &&
1250 : idesc->rd_index->indimmediate);
1251 :
1252 280962 : index_close(idesc, AccessShareLock);
1253 280962 : UnlockRelationOid(cache->cc_reloid, AccessShareLock);
1254 : }
1255 321550 : }
1256 :
1257 :
1258 : /*
1259 : * IndexScanOK
1260 : *
1261 : * This function checks for tuples that will be fetched by
1262 : * IndexSupportInitialize() during relcache initialization for
1263 : * certain system indexes that support critical syscaches.
1264 : * We can't use an indexscan to fetch these, else we'll get into
1265 : * infinite recursion. A plain heap scan will work, however.
1266 : * Once we have completed relcache initialization (signaled by
1267 : * criticalRelcachesBuilt), we don't have to worry anymore.
1268 : *
1269 : * Similarly, during backend startup we have to be able to use the
1270 : * pg_authid, pg_auth_members and pg_database syscaches for
1271 : * authentication even if we don't yet have relcache entries for those
1272 : * catalogs' indexes.
1273 : */
1274 : static bool
1275 6605916 : IndexScanOK(CatCache *cache)
1276 : {
1277 6605916 : switch (cache->id)
1278 : {
1279 630054 : case INDEXRELID:
1280 :
1281 : /*
1282 : * Rather than tracking exactly which indexes have to be loaded
1283 : * before we can use indexscans (which changes from time to time),
1284 : * just force all pg_index searches to be heap scans until we've
1285 : * built the critical relcaches.
1286 : */
1287 630054 : if (!criticalRelcachesBuilt)
1288 36780 : return false;
1289 593274 : break;
1290 :
1291 61096 : case AMOID:
1292 : case AMNAME:
1293 :
1294 : /*
1295 : * Always do heap scans in pg_am, because it's so small there's
1296 : * not much point in an indexscan anyway. We *must* do this when
1297 : * initially building critical relcache entries, but we might as
1298 : * well just always do it.
1299 : */
1300 61096 : return false;
1301 :
1302 114542 : case AUTHNAME:
1303 : case AUTHOID:
1304 : case AUTHMEMMEMROLE:
1305 : case DATABASEOID:
1306 :
1307 : /*
1308 : * Protect authentication lookups occurring before relcache has
1309 : * collected entries for shared indexes.
1310 : */
1311 114542 : if (!criticalSharedRelcachesBuilt)
1312 4472 : return false;
1313 110070 : break;
1314 :
1315 5800224 : default:
1316 5800224 : break;
1317 : }
1318 :
1319 : /* Normal case, allow index scan */
1320 6503568 : return true;
1321 : }
1322 :
1323 : /*
1324 : * SearchCatCache
1325 : *
1326 : * This call searches a system cache for a tuple, opening the relation
1327 : * if necessary (on the first access to a particular cache).
1328 : *
1329 : * The result is NULL if not found, or a pointer to a HeapTuple in
1330 : * the cache. The caller must not modify the tuple, and must call
1331 : * ReleaseCatCache() when done with it.
1332 : *
1333 : * The search key values should be expressed as Datums of the key columns'
1334 : * datatype(s). (Pass zeroes for any unused parameters.) As a special
1335 : * exception, the passed-in key for a NAME column can be just a C string;
1336 : * the caller need not go to the trouble of converting it to a fully
1337 : * null-padded NAME.
1338 : */
1339 : HeapTuple
1340 5639638 : SearchCatCache(CatCache *cache,
1341 : Datum v1,
1342 : Datum v2,
1343 : Datum v3,
1344 : Datum v4)
1345 : {
1346 5639638 : return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1347 : }
1348 :
1349 :
1350 : /*
1351 : * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1352 : * arguments. The compiler can inline the body and unroll loops, making them a
1353 : * bit faster than SearchCatCache().
1354 : */
1355 :
1356 : HeapTuple
1357 72233480 : SearchCatCache1(CatCache *cache,
1358 : Datum v1)
1359 : {
1360 72233480 : return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1361 : }
1362 :
1363 :
1364 : HeapTuple
1365 5822014 : SearchCatCache2(CatCache *cache,
1366 : Datum v1, Datum v2)
1367 : {
1368 5822014 : return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1369 : }
1370 :
1371 :
1372 : HeapTuple
1373 5666892 : SearchCatCache3(CatCache *cache,
1374 : Datum v1, Datum v2, Datum v3)
1375 : {
1376 5666892 : return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1377 : }
1378 :
1379 :
1380 : HeapTuple
1381 4411330 : SearchCatCache4(CatCache *cache,
1382 : Datum v1, Datum v2, Datum v3, Datum v4)
1383 : {
1384 4411330 : return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1385 : }
1386 :
1387 : /*
1388 : * Work-horse for SearchCatCache/SearchCatCacheN.
1389 : */
1390 : static inline HeapTuple
1391 93773354 : SearchCatCacheInternal(CatCache *cache,
1392 : int nkeys,
1393 : Datum v1,
1394 : Datum v2,
1395 : Datum v3,
1396 : Datum v4)
1397 : {
1398 : Datum arguments[CATCACHE_MAXKEYS];
1399 : uint32 hashValue;
1400 : Index hashIndex;
1401 : dlist_iter iter;
1402 : dlist_head *bucket;
1403 : CatCTup *ct;
1404 :
1405 : Assert(cache->cc_nkeys == nkeys);
1406 :
1407 : /*
1408 : * one-time startup overhead for each cache
1409 : */
1410 93773354 : ConditionalCatalogCacheInitializeCache(cache);
1411 :
1412 : #ifdef CATCACHE_STATS
1413 : cache->cc_searches++;
1414 : #endif
1415 :
1416 : /* Initialize local parameter array */
1417 93773348 : arguments[0] = v1;
1418 93773348 : arguments[1] = v2;
1419 93773348 : arguments[2] = v3;
1420 93773348 : arguments[3] = v4;
1421 :
1422 : /*
1423 : * find the hash bucket in which to look for the tuple
1424 : */
1425 93773348 : hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1426 93773348 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1427 :
1428 : /*
1429 : * scan the hash bucket until we find a match or exhaust our tuples
1430 : *
1431 : * Note: it's okay to use dlist_foreach here, even though we modify the
1432 : * dlist within the loop, because we don't continue the loop afterwards.
1433 : */
1434 93773348 : bucket = &cache->cc_bucket[hashIndex];
1435 100439030 : dlist_foreach(iter, bucket)
1436 : {
1437 94154834 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1438 :
1439 94154834 : if (ct->dead)
1440 0 : continue; /* ignore dead entries */
1441 :
1442 94154834 : if (ct->hash_value != hashValue)
1443 6665682 : continue; /* quickly skip entry if wrong hash val */
1444 :
1445 87489152 : if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1446 0 : continue;
1447 :
1448 : /*
1449 : * We found a match in the cache. Move it to the front of the list
1450 : * for its hashbucket, in order to speed subsequent searches. (The
1451 : * most frequently accessed elements in any hashbucket will tend to be
1452 : * near the front of the hashbucket's list.)
1453 : */
1454 87489152 : dlist_move_head(bucket, &ct->cache_elem);
1455 :
1456 : /*
1457 : * If it's a positive entry, bump its refcount and return it. If it's
1458 : * negative, we can report failure to the caller.
1459 : */
1460 87489152 : if (!ct->negative)
1461 : {
1462 83256260 : ResourceOwnerEnlarge(CurrentResourceOwner);
1463 83256260 : ct->refcount++;
1464 83256260 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1465 :
1466 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1467 : cache->cc_relname, hashIndex);
1468 :
1469 : #ifdef CATCACHE_STATS
1470 : cache->cc_hits++;
1471 : #endif
1472 :
1473 83256260 : return &ct->tuple;
1474 : }
1475 : else
1476 : {
1477 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1478 : cache->cc_relname, hashIndex);
1479 :
1480 : #ifdef CATCACHE_STATS
1481 : cache->cc_neg_hits++;
1482 : #endif
1483 :
1484 4232892 : return NULL;
1485 : }
1486 : }
1487 :
1488 6284196 : return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1489 : }
1490 :
1491 : /*
1492 : * Search the actual catalogs, rather than the cache.
1493 : *
1494 : * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1495 : * as small as possible. To avoid that effort being undone by a helpful
1496 : * compiler, try to explicitly forbid inlining.
1497 : */
1498 : static pg_noinline HeapTuple
1499 6284196 : SearchCatCacheMiss(CatCache *cache,
1500 : int nkeys,
1501 : uint32 hashValue,
1502 : Index hashIndex,
1503 : Datum v1,
1504 : Datum v2,
1505 : Datum v3,
1506 : Datum v4)
1507 : {
1508 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1509 : Relation relation;
1510 : SysScanDesc scandesc;
1511 : HeapTuple ntp;
1512 : CatCTup *ct;
1513 : bool stale;
1514 : Datum arguments[CATCACHE_MAXKEYS];
1515 :
1516 : /* Initialize local parameter array */
1517 6284196 : arguments[0] = v1;
1518 6284196 : arguments[1] = v2;
1519 6284196 : arguments[2] = v3;
1520 6284196 : arguments[3] = v4;
1521 :
1522 : /*
1523 : * Tuple was not found in cache, so we have to try to retrieve it directly
1524 : * from the relation. If found, we will add it to the cache; if not
1525 : * found, we will add a negative cache entry instead.
1526 : *
1527 : * NOTE: it is possible for recursive cache lookups to occur while reading
1528 : * the relation --- for example, due to shared-cache-inval messages being
1529 : * processed during table_open(). This is OK. It's even possible for one
1530 : * of those lookups to find and enter the very same tuple we are trying to
1531 : * fetch here. If that happens, we will enter a second copy of the tuple
1532 : * into the cache. The first copy will never be referenced again, and
1533 : * will eventually age out of the cache, so there's no functional problem.
1534 : * This case is rare enough that it's not worth expending extra cycles to
1535 : * detect.
1536 : *
1537 : * Another case, which we *must* handle, is that the tuple could become
1538 : * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
1539 : * AcceptInvalidationMessages can run during TOAST table access). We do
1540 : * not want to return already-stale catcache entries, so we loop around
1541 : * and do the table scan again if that happens.
1542 : */
1543 6284196 : relation = table_open(cache->cc_reloid, AccessShareLock);
1544 :
1545 : /*
1546 : * Ok, need to make a lookup in the relation, copy the scankey and fill
1547 : * out any per-call fields.
1548 : */
1549 6284196 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1550 6284196 : cur_skey[0].sk_argument = v1;
1551 6284196 : cur_skey[1].sk_argument = v2;
1552 6284196 : cur_skey[2].sk_argument = v3;
1553 6284196 : cur_skey[3].sk_argument = v4;
1554 :
1555 : do
1556 : {
1557 6284196 : scandesc = systable_beginscan(relation,
1558 : cache->cc_indexoid,
1559 6284196 : IndexScanOK(cache),
1560 : NULL,
1561 : nkeys,
1562 : cur_skey);
1563 :
1564 6284196 : ct = NULL;
1565 6284196 : stale = false;
1566 :
1567 6284196 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1568 : {
1569 4503382 : ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1570 : hashValue, hashIndex);
1571 : /* upon failure, we must start the scan over */
1572 4503382 : if (ct == NULL)
1573 : {
1574 0 : stale = true;
1575 0 : break;
1576 : }
1577 : /* immediately set the refcount to 1 */
1578 4503382 : ResourceOwnerEnlarge(CurrentResourceOwner);
1579 4503382 : ct->refcount++;
1580 4503382 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1581 4503382 : break; /* assume only one match */
1582 : }
1583 :
1584 6284196 : systable_endscan(scandesc);
1585 6284196 : } while (stale);
1586 :
1587 6284196 : table_close(relation, AccessShareLock);
1588 :
1589 : /*
1590 : * If tuple was not found, we need to build a negative cache entry
1591 : * containing a fake tuple. The fake tuple has the correct key columns,
1592 : * but nulls everywhere else.
1593 : *
1594 : * In bootstrap mode, we don't build negative entries, because the cache
1595 : * invalidation mechanism isn't alive and can't clear them if the tuple
1596 : * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1597 : * cache inval for that.)
1598 : */
1599 6284196 : if (ct == NULL)
1600 : {
1601 1780814 : if (IsBootstrapProcessingMode())
1602 53214 : return NULL;
1603 :
1604 1727600 : ct = CatalogCacheCreateEntry(cache, NULL, arguments,
1605 : hashValue, hashIndex);
1606 :
1607 : /* Creating a negative cache entry shouldn't fail */
1608 : Assert(ct != NULL);
1609 :
1610 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1611 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1612 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1613 : cache->cc_relname, hashIndex);
1614 :
1615 : /*
1616 : * We are not returning the negative entry to the caller, so leave its
1617 : * refcount zero.
1618 : */
1619 :
1620 1727600 : return NULL;
1621 : }
1622 :
1623 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1624 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1625 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1626 : cache->cc_relname, hashIndex);
1627 :
1628 : #ifdef CATCACHE_STATS
1629 : cache->cc_newloads++;
1630 : #endif
1631 :
1632 4503382 : return &ct->tuple;
1633 : }
1634 :
1635 : /*
1636 : * ReleaseCatCache
1637 : *
1638 : * Decrement the reference count of a catcache entry (releasing the
1639 : * hold grabbed by a successful SearchCatCache).
1640 : *
1641 : * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1642 : * will be freed as soon as their refcount goes to zero. In combination
1643 : * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1644 : * to catch references to already-released catcache entries.
1645 : */
1646 : void
1647 87749016 : ReleaseCatCache(HeapTuple tuple)
1648 : {
1649 87749016 : ReleaseCatCacheWithOwner(tuple, CurrentResourceOwner);
1650 87749016 : }
1651 :
1652 : static void
1653 87759642 : ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner)
1654 : {
1655 87759642 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
1656 : offsetof(CatCTup, tuple));
1657 :
1658 : /* Safety checks to ensure we were handed a cache entry */
1659 : Assert(ct->ct_magic == CT_MAGIC);
1660 : Assert(ct->refcount > 0);
1661 :
1662 87759642 : ct->refcount--;
1663 87759642 : if (resowner)
1664 87749016 : ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1665 :
1666 87759642 : if (
1667 : #ifndef CATCACHE_FORCE_RELEASE
1668 87759642 : ct->dead &&
1669 : #endif
1670 1536 : ct->refcount == 0 &&
1671 1416 : (ct->c_list == NULL || ct->c_list->refcount == 0))
1672 1416 : CatCacheRemoveCTup(ct->my_cache, ct);
1673 87759642 : }
1674 :
1675 :
1676 : /*
1677 : * GetCatCacheHashValue
1678 : *
1679 : * Compute the hash value for a given set of search keys.
1680 : *
1681 : * The reason for exposing this as part of the API is that the hash value is
1682 : * exposed in cache invalidation operations, so there are places outside the
1683 : * catcache code that need to be able to compute the hash values.
1684 : */
1685 : uint32
1686 1095436 : GetCatCacheHashValue(CatCache *cache,
1687 : Datum v1,
1688 : Datum v2,
1689 : Datum v3,
1690 : Datum v4)
1691 : {
1692 : /*
1693 : * one-time startup overhead for each cache
1694 : */
1695 1095436 : ConditionalCatalogCacheInitializeCache(cache);
1696 :
1697 : /*
1698 : * calculate the hash value
1699 : */
1700 1095436 : return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1701 : }
1702 :
1703 :
1704 : /*
1705 : * SearchCatCacheList
1706 : *
1707 : * Generate a list of all tuples matching a partial key (that is,
1708 : * a key specifying just the first K of the cache's N key columns).
1709 : *
1710 : * It doesn't make any sense to specify all of the cache's key columns
1711 : * here: since the key is unique, there could be at most one match, so
1712 : * you ought to use SearchCatCache() instead. Hence this function takes
1713 : * one fewer Datum argument than SearchCatCache() does.
1714 : *
1715 : * The caller must not modify the list object or the pointed-to tuples,
1716 : * and must call ReleaseCatCacheList() when done with the list.
1717 : */
1718 : CatCList *
1719 3868000 : SearchCatCacheList(CatCache *cache,
1720 : int nkeys,
1721 : Datum v1,
1722 : Datum v2,
1723 : Datum v3)
1724 : {
1725 3868000 : Datum v4 = 0; /* dummy last-column value */
1726 : Datum arguments[CATCACHE_MAXKEYS];
1727 : uint32 lHashValue;
1728 : Index lHashIndex;
1729 : dlist_iter iter;
1730 : dlist_head *lbucket;
1731 : CatCList *cl;
1732 : CatCTup *ct;
1733 : List *volatile ctlist;
1734 : ListCell *ctlist_item;
1735 : int nmembers;
1736 : bool ordered;
1737 : HeapTuple ntp;
1738 : MemoryContext oldcxt;
1739 : int i;
1740 : CatCInProgress *save_in_progress;
1741 : CatCInProgress in_progress_ent;
1742 :
1743 : /*
1744 : * one-time startup overhead for each cache
1745 : */
1746 3868000 : ConditionalCatalogCacheInitializeCache(cache);
1747 :
1748 : Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1749 :
1750 : #ifdef CATCACHE_STATS
1751 : cache->cc_lsearches++;
1752 : #endif
1753 :
1754 : /* Initialize local parameter array */
1755 3868000 : arguments[0] = v1;
1756 3868000 : arguments[1] = v2;
1757 3868000 : arguments[2] = v3;
1758 3868000 : arguments[3] = v4;
1759 :
1760 : /*
1761 : * If we haven't previously done a list search in this cache, create the
1762 : * bucket header array; otherwise, consider whether it's time to enlarge
1763 : * it.
1764 : */
1765 3868000 : if (cache->cc_lbucket == NULL)
1766 : {
1767 : /* Arbitrary initial size --- must be a power of 2 */
1768 47864 : int nbuckets = 16;
1769 :
1770 47864 : cache->cc_lbucket = (dlist_head *)
1771 47864 : MemoryContextAllocZero(CacheMemoryContext,
1772 : nbuckets * sizeof(dlist_head));
1773 : /* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */
1774 47864 : cache->cc_nlbuckets = nbuckets;
1775 : }
1776 : else
1777 : {
1778 : /*
1779 : * If the hash table has become too full, enlarge the buckets array.
1780 : * Quite arbitrarily, we enlarge when fill factor > 2.
1781 : */
1782 3820136 : if (cache->cc_nlist > cache->cc_nlbuckets * 2)
1783 1220 : RehashCatCacheLists(cache);
1784 : }
1785 :
1786 : /*
1787 : * Find the hash bucket in which to look for the CatCList.
1788 : */
1789 3868000 : lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1790 3868000 : lHashIndex = HASH_INDEX(lHashValue, cache->cc_nlbuckets);
1791 :
1792 : /*
1793 : * scan the items until we find a match or exhaust our list
1794 : *
1795 : * Note: it's okay to use dlist_foreach here, even though we modify the
1796 : * dlist within the loop, because we don't continue the loop afterwards.
1797 : */
1798 3868000 : lbucket = &cache->cc_lbucket[lHashIndex];
1799 4256840 : dlist_foreach(iter, lbucket)
1800 : {
1801 3935132 : cl = dlist_container(CatCList, cache_elem, iter.cur);
1802 :
1803 3935132 : if (cl->dead)
1804 0 : continue; /* ignore dead entries */
1805 :
1806 3935132 : if (cl->hash_value != lHashValue)
1807 388840 : continue; /* quickly skip entry if wrong hash val */
1808 :
1809 : /*
1810 : * see if the cached list matches our key.
1811 : */
1812 3546292 : if (cl->nkeys != nkeys)
1813 0 : continue;
1814 :
1815 3546292 : if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1816 0 : continue;
1817 :
1818 : /*
1819 : * We found a matching list. Move the list to the front of the list
1820 : * for its hashbucket, so as to speed subsequent searches. (We do not
1821 : * move the members to the fronts of their hashbucket lists, however,
1822 : * since there's no point in that unless they are searched for
1823 : * individually.)
1824 : */
1825 3546292 : dlist_move_head(lbucket, &cl->cache_elem);
1826 :
1827 : /* Bump the list's refcount and return it */
1828 3546292 : ResourceOwnerEnlarge(CurrentResourceOwner);
1829 3546292 : cl->refcount++;
1830 3546292 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1831 :
1832 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1833 : cache->cc_relname);
1834 :
1835 : #ifdef CATCACHE_STATS
1836 : cache->cc_lhits++;
1837 : #endif
1838 :
1839 3546292 : return cl;
1840 : }
1841 :
1842 : /*
1843 : * List was not found in cache, so we have to build it by reading the
1844 : * relation. For each matching tuple found in the relation, use an
1845 : * existing cache entry if possible, else build a new one.
1846 : *
1847 : * We have to bump the member refcounts temporarily to ensure they won't
1848 : * get dropped from the cache while loading other members. We use a PG_TRY
1849 : * block to ensure we can undo those refcounts if we get an error before
1850 : * we finish constructing the CatCList. ctlist must be valid throughout
1851 : * the PG_TRY block.
1852 : */
1853 321708 : ctlist = NIL;
1854 :
1855 : /*
1856 : * Cache invalidation can happen while we're building the list.
1857 : * CatalogCacheCreateEntry() handles concurrent invalidation of individual
1858 : * tuples, but it's also possible that a new entry is concurrently added
1859 : * that should be part of the list we're building. Register an
1860 : * "in-progress" entry that will receive the invalidation, until we have
1861 : * built the final list entry.
1862 : */
1863 321708 : save_in_progress = catcache_in_progress_stack;
1864 321708 : in_progress_ent.next = catcache_in_progress_stack;
1865 321708 : in_progress_ent.cache = cache;
1866 321708 : in_progress_ent.hash_value = lHashValue;
1867 321708 : in_progress_ent.list = true;
1868 321708 : in_progress_ent.dead = false;
1869 321708 : catcache_in_progress_stack = &in_progress_ent;
1870 :
1871 321708 : PG_TRY();
1872 : {
1873 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1874 : Relation relation;
1875 : SysScanDesc scandesc;
1876 321708 : bool first_iter = true;
1877 :
1878 321708 : relation = table_open(cache->cc_reloid, AccessShareLock);
1879 :
1880 : /*
1881 : * Ok, need to make a lookup in the relation, copy the scankey and
1882 : * fill out any per-call fields.
1883 : */
1884 321708 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1885 321708 : cur_skey[0].sk_argument = v1;
1886 321708 : cur_skey[1].sk_argument = v2;
1887 321708 : cur_skey[2].sk_argument = v3;
1888 321708 : cur_skey[3].sk_argument = v4;
1889 :
1890 : /*
1891 : * Scan the table for matching entries. If an invalidation arrives
1892 : * mid-build, we will loop back here to retry.
1893 : */
1894 : do
1895 : {
1896 : /*
1897 : * If we are retrying, release refcounts on any items created on
1898 : * the previous iteration. We dare not try to free them if
1899 : * they're now unreferenced, since an error while doing that would
1900 : * result in the PG_CATCH below doing extra refcount decrements.
1901 : * Besides, we'll likely re-adopt those items in the next
1902 : * iteration, so it's not worth complicating matters to try to get
1903 : * rid of them.
1904 : */
1905 321722 : foreach(ctlist_item, ctlist)
1906 : {
1907 2 : ct = (CatCTup *) lfirst(ctlist_item);
1908 : Assert(ct->c_list == NULL);
1909 : Assert(ct->refcount > 0);
1910 2 : ct->refcount--;
1911 : }
1912 : /* Reset ctlist in preparation for new try */
1913 321720 : ctlist = NIL;
1914 321720 : in_progress_ent.dead = false;
1915 :
1916 643440 : scandesc = systable_beginscan(relation,
1917 : cache->cc_indexoid,
1918 321720 : IndexScanOK(cache),
1919 : NULL,
1920 : nkeys,
1921 : cur_skey);
1922 :
1923 : /* The list will be ordered iff we are doing an index scan */
1924 321720 : ordered = (scandesc->irel != NULL);
1925 :
1926 : /* Injection point to help testing the recursive invalidation case */
1927 321720 : if (first_iter)
1928 : {
1929 321708 : INJECTION_POINT("catcache-list-miss-systable-scan-started");
1930 321708 : first_iter = false;
1931 : }
1932 :
1933 1313146 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)) &&
1934 991436 : !in_progress_ent.dead)
1935 : {
1936 : uint32 hashValue;
1937 : Index hashIndex;
1938 991426 : bool found = false;
1939 : dlist_head *bucket;
1940 :
1941 : /*
1942 : * See if there's an entry for this tuple already.
1943 : */
1944 991426 : ct = NULL;
1945 991426 : hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1946 991426 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1947 :
1948 991426 : bucket = &cache->cc_bucket[hashIndex];
1949 1366826 : dlist_foreach(iter, bucket)
1950 : {
1951 515172 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1952 :
1953 515172 : if (ct->dead || ct->negative)
1954 994 : continue; /* ignore dead and negative entries */
1955 :
1956 514178 : if (ct->hash_value != hashValue)
1957 355162 : continue; /* quickly skip entry if wrong hash val */
1958 :
1959 159016 : if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1960 0 : continue; /* not same tuple */
1961 :
1962 : /*
1963 : * Found a match, but can't use it if it belongs to
1964 : * another list already
1965 : */
1966 159016 : if (ct->c_list)
1967 19244 : continue;
1968 :
1969 139772 : found = true;
1970 139772 : break; /* A-OK */
1971 : }
1972 :
1973 991426 : if (!found)
1974 : {
1975 : /* We didn't find a usable entry, so make a new one */
1976 851654 : ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1977 : hashValue, hashIndex);
1978 :
1979 : /* upon failure, we must start the scan over */
1980 851654 : if (ct == NULL)
1981 : {
1982 0 : in_progress_ent.dead = true;
1983 0 : break;
1984 : }
1985 : }
1986 :
1987 : /* Careful here: add entry to ctlist, then bump its refcount */
1988 : /* This way leaves state correct if lappend runs out of memory */
1989 991426 : ctlist = lappend(ctlist, ct);
1990 991426 : ct->refcount++;
1991 : }
1992 :
1993 321720 : systable_endscan(scandesc);
1994 321720 : } while (in_progress_ent.dead);
1995 :
1996 321708 : table_close(relation, AccessShareLock);
1997 :
1998 : /* Make sure the resource owner has room to remember this entry. */
1999 321708 : ResourceOwnerEnlarge(CurrentResourceOwner);
2000 :
2001 : /* Now we can build the CatCList entry. */
2002 321708 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2003 321708 : nmembers = list_length(ctlist);
2004 : cl = (CatCList *)
2005 321708 : palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
2006 :
2007 : /* Extract key values */
2008 321708 : CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
2009 321708 : arguments, cl->keys);
2010 321708 : MemoryContextSwitchTo(oldcxt);
2011 :
2012 : /*
2013 : * We are now past the last thing that could trigger an elog before we
2014 : * have finished building the CatCList and remembering it in the
2015 : * resource owner. So it's OK to fall out of the PG_TRY, and indeed
2016 : * we'd better do so before we start marking the members as belonging
2017 : * to the list.
2018 : */
2019 : }
2020 0 : PG_CATCH();
2021 : {
2022 : Assert(catcache_in_progress_stack == &in_progress_ent);
2023 0 : catcache_in_progress_stack = save_in_progress;
2024 :
2025 0 : foreach(ctlist_item, ctlist)
2026 : {
2027 0 : ct = (CatCTup *) lfirst(ctlist_item);
2028 : Assert(ct->c_list == NULL);
2029 : Assert(ct->refcount > 0);
2030 0 : ct->refcount--;
2031 0 : if (
2032 : #ifndef CATCACHE_FORCE_RELEASE
2033 0 : ct->dead &&
2034 : #endif
2035 0 : ct->refcount == 0 &&
2036 0 : (ct->c_list == NULL || ct->c_list->refcount == 0))
2037 0 : CatCacheRemoveCTup(cache, ct);
2038 : }
2039 :
2040 0 : PG_RE_THROW();
2041 : }
2042 321708 : PG_END_TRY();
2043 : Assert(catcache_in_progress_stack == &in_progress_ent);
2044 321708 : catcache_in_progress_stack = save_in_progress;
2045 :
2046 321708 : cl->cl_magic = CL_MAGIC;
2047 321708 : cl->my_cache = cache;
2048 321708 : cl->refcount = 0; /* for the moment */
2049 321708 : cl->dead = false;
2050 321708 : cl->ordered = ordered;
2051 321708 : cl->nkeys = nkeys;
2052 321708 : cl->hash_value = lHashValue;
2053 321708 : cl->n_members = nmembers;
2054 :
2055 321708 : i = 0;
2056 1313132 : foreach(ctlist_item, ctlist)
2057 : {
2058 991424 : cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
2059 : Assert(ct->c_list == NULL);
2060 991424 : ct->c_list = cl;
2061 : /* release the temporary refcount on the member */
2062 : Assert(ct->refcount > 0);
2063 991424 : ct->refcount--;
2064 : /* mark list dead if any members already dead */
2065 991424 : if (ct->dead)
2066 0 : cl->dead = true;
2067 : }
2068 : Assert(i == nmembers);
2069 :
2070 : /*
2071 : * Add the CatCList to the appropriate bucket, and count it.
2072 : */
2073 321708 : dlist_push_head(lbucket, &cl->cache_elem);
2074 :
2075 321708 : cache->cc_nlist++;
2076 :
2077 : /* Finally, bump the list's refcount and return it */
2078 321708 : cl->refcount++;
2079 321708 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
2080 :
2081 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
2082 : cache->cc_relname, nmembers);
2083 :
2084 321708 : return cl;
2085 : }
2086 :
2087 : /*
2088 : * ReleaseCatCacheList
2089 : *
2090 : * Decrement the reference count of a catcache list.
2091 : */
2092 : void
2093 3867964 : ReleaseCatCacheList(CatCList *list)
2094 : {
2095 3867964 : ReleaseCatCacheListWithOwner(list, CurrentResourceOwner);
2096 3867964 : }
2097 :
2098 : static void
2099 3868000 : ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner)
2100 : {
2101 : /* Safety checks to ensure we were handed a cache entry */
2102 : Assert(list->cl_magic == CL_MAGIC);
2103 : Assert(list->refcount > 0);
2104 3868000 : list->refcount--;
2105 3868000 : if (resowner)
2106 3867964 : ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
2107 :
2108 3868000 : if (
2109 : #ifndef CATCACHE_FORCE_RELEASE
2110 3868000 : list->dead &&
2111 : #endif
2112 6 : list->refcount == 0)
2113 6 : CatCacheRemoveCList(list->my_cache, list);
2114 3868000 : }
2115 :
2116 :
2117 : /*
2118 : * CatalogCacheCreateEntry
2119 : * Create a new CatCTup entry, copying the given HeapTuple and other
2120 : * supplied data into it. The new entry initially has refcount 0.
2121 : *
2122 : * To create a normal cache entry, ntp must be the HeapTuple just fetched
2123 : * from scandesc, and "arguments" is not used. To create a negative cache
2124 : * entry, pass NULL for ntp; then "arguments" is the cache keys to use.
2125 : * In either case, hashValue/hashIndex are the hash values computed from
2126 : * the cache keys.
2127 : *
2128 : * Returns NULL if we attempt to detoast the tuple and observe that it
2129 : * became stale. (This cannot happen for a negative entry.) Caller must
2130 : * retry the tuple lookup in that case.
2131 : */
2132 : static CatCTup *
2133 7082636 : CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
2134 : uint32 hashValue, Index hashIndex)
2135 : {
2136 : CatCTup *ct;
2137 : MemoryContext oldcxt;
2138 :
2139 7082636 : if (ntp)
2140 : {
2141 : int i;
2142 5355036 : HeapTuple dtp = NULL;
2143 :
2144 : /*
2145 : * The invalidation of the in-progress entry essentially never happens
2146 : * during our regression tests, and there's no easy way to force it to
2147 : * fail for testing purposes. To ensure we have test coverage for the
2148 : * retry paths in our callers, make debug builds randomly fail about
2149 : * 0.1% of the times through this code path, even when there's no
2150 : * toasted fields.
2151 : */
2152 : #ifdef USE_ASSERT_CHECKING
2153 : if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 1000))
2154 : return NULL;
2155 : #endif
2156 :
2157 : /*
2158 : * If there are any out-of-line toasted fields in the tuple, expand
2159 : * them in-line. This saves cycles during later use of the catcache
2160 : * entry, and also protects us against the possibility of the toast
2161 : * tuples being freed before we attempt to fetch them, in case of
2162 : * something using a slightly stale catcache entry.
2163 : */
2164 5355036 : if (HeapTupleHasExternal(ntp))
2165 : {
2166 : CatCInProgress *save_in_progress;
2167 : CatCInProgress in_progress_ent;
2168 :
2169 : /*
2170 : * The tuple could become stale while we are doing toast table
2171 : * access (since AcceptInvalidationMessages can run then). The
2172 : * invalidation will mark our in-progress entry as dead.
2173 : */
2174 4276 : save_in_progress = catcache_in_progress_stack;
2175 4276 : in_progress_ent.next = catcache_in_progress_stack;
2176 4276 : in_progress_ent.cache = cache;
2177 4276 : in_progress_ent.hash_value = hashValue;
2178 4276 : in_progress_ent.list = false;
2179 4276 : in_progress_ent.dead = false;
2180 4276 : catcache_in_progress_stack = &in_progress_ent;
2181 :
2182 4276 : PG_TRY();
2183 : {
2184 4276 : dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
2185 : }
2186 0 : PG_FINALLY();
2187 : {
2188 : Assert(catcache_in_progress_stack == &in_progress_ent);
2189 4276 : catcache_in_progress_stack = save_in_progress;
2190 : }
2191 4276 : PG_END_TRY();
2192 :
2193 4276 : if (in_progress_ent.dead)
2194 : {
2195 0 : heap_freetuple(dtp);
2196 0 : return NULL;
2197 : }
2198 : }
2199 : else
2200 5350760 : dtp = ntp;
2201 :
2202 : /* Allocate memory for CatCTup and the cached tuple in one go */
2203 5355036 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2204 :
2205 10710072 : ct = (CatCTup *) palloc(sizeof(CatCTup) +
2206 5355036 : MAXIMUM_ALIGNOF + dtp->t_len);
2207 5355036 : ct->tuple.t_len = dtp->t_len;
2208 5355036 : ct->tuple.t_self = dtp->t_self;
2209 5355036 : ct->tuple.t_tableOid = dtp->t_tableOid;
2210 5355036 : ct->tuple.t_data = (HeapTupleHeader)
2211 5355036 : MAXALIGN(((char *) ct) + sizeof(CatCTup));
2212 : /* copy tuple contents */
2213 5355036 : memcpy((char *) ct->tuple.t_data,
2214 5355036 : (const char *) dtp->t_data,
2215 5355036 : dtp->t_len);
2216 5355036 : MemoryContextSwitchTo(oldcxt);
2217 :
2218 5355036 : if (dtp != ntp)
2219 4276 : heap_freetuple(dtp);
2220 :
2221 : /* extract keys - they'll point into the tuple if not by-value */
2222 15554540 : for (i = 0; i < cache->cc_nkeys; i++)
2223 : {
2224 : Datum atp;
2225 : bool isnull;
2226 :
2227 10199504 : atp = heap_getattr(&ct->tuple,
2228 : cache->cc_keyno[i],
2229 : cache->cc_tupdesc,
2230 : &isnull);
2231 : Assert(!isnull);
2232 10199504 : ct->keys[i] = atp;
2233 : }
2234 : }
2235 : else
2236 : {
2237 : /* Set up keys for a negative cache entry */
2238 1727600 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2239 1727600 : ct = (CatCTup *) palloc(sizeof(CatCTup));
2240 :
2241 : /*
2242 : * Store keys - they'll point into separately allocated memory if not
2243 : * by-value.
2244 : */
2245 1727600 : CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
2246 1727600 : arguments, ct->keys);
2247 1727600 : MemoryContextSwitchTo(oldcxt);
2248 : }
2249 :
2250 : /*
2251 : * Finish initializing the CatCTup header, and add it to the cache's
2252 : * linked list and counts.
2253 : */
2254 7082636 : ct->ct_magic = CT_MAGIC;
2255 7082636 : ct->my_cache = cache;
2256 7082636 : ct->c_list = NULL;
2257 7082636 : ct->refcount = 0; /* for the moment */
2258 7082636 : ct->dead = false;
2259 7082636 : ct->negative = (ntp == NULL);
2260 7082636 : ct->hash_value = hashValue;
2261 :
2262 7082636 : dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
2263 :
2264 7082636 : cache->cc_ntup++;
2265 7082636 : CacheHdr->ch_ntup++;
2266 :
2267 : /*
2268 : * If the hash table has become too full, enlarge the buckets array. Quite
2269 : * arbitrarily, we enlarge when fill factor > 2.
2270 : */
2271 7082636 : if (cache->cc_ntup > cache->cc_nbuckets * 2)
2272 6516 : RehashCatCache(cache);
2273 :
2274 7082636 : return ct;
2275 : }
2276 :
2277 : /*
2278 : * Helper routine that frees keys stored in the keys array.
2279 : */
2280 : static void
2281 582168 : CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
2282 : {
2283 : int i;
2284 :
2285 1793796 : for (i = 0; i < nkeys; i++)
2286 : {
2287 1211628 : int attnum = attnos[i];
2288 : Form_pg_attribute att;
2289 :
2290 : /* system attribute are not supported in caches */
2291 : Assert(attnum > 0);
2292 :
2293 1211628 : att = TupleDescAttr(tupdesc, attnum - 1);
2294 :
2295 1211628 : if (!att->attbyval)
2296 511492 : pfree(DatumGetPointer(keys[i]));
2297 : }
2298 582168 : }
2299 :
2300 : /*
2301 : * Helper routine that copies the keys in the srckeys array into the dstkeys
2302 : * one, guaranteeing that the datums are fully allocated in the current memory
2303 : * context.
2304 : */
2305 : static void
2306 2049308 : CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
2307 : Datum *srckeys, Datum *dstkeys)
2308 : {
2309 : int i;
2310 :
2311 : /*
2312 : * XXX: memory and lookup performance could possibly be improved by
2313 : * storing all keys in one allocation.
2314 : */
2315 :
2316 6415942 : for (i = 0; i < nkeys; i++)
2317 : {
2318 4366634 : int attnum = attnos[i];
2319 4366634 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2320 4366634 : Datum src = srckeys[i];
2321 : NameData srcname;
2322 :
2323 : /*
2324 : * Must be careful in case the caller passed a C string where a NAME
2325 : * is wanted: convert the given argument to a correctly padded NAME.
2326 : * Otherwise the memcpy() done by datumCopy() could fall off the end
2327 : * of memory.
2328 : */
2329 4366634 : if (att->atttypid == NAMEOID)
2330 : {
2331 868214 : namestrcpy(&srcname, DatumGetCString(src));
2332 868214 : src = NameGetDatum(&srcname);
2333 : }
2334 :
2335 4366634 : dstkeys[i] = datumCopy(src,
2336 4366634 : att->attbyval,
2337 4366634 : att->attlen);
2338 : }
2339 2049308 : }
2340 :
2341 : /*
2342 : * PrepareToInvalidateCacheTuple()
2343 : *
2344 : * This is part of a rather subtle chain of events, so pay attention:
2345 : *
2346 : * When a tuple is inserted or deleted, it cannot be flushed from the
2347 : * catcaches immediately, for reasons explained at the top of cache/inval.c.
2348 : * Instead we have to add entry(s) for the tuple to a list of pending tuple
2349 : * invalidations that will be done at the end of the command or transaction.
2350 : *
2351 : * The lists of tuples that need to be flushed are kept by inval.c. This
2352 : * routine is a helper routine for inval.c. Given a tuple belonging to
2353 : * the specified relation, find all catcaches it could be in, compute the
2354 : * correct hash value for each such catcache, and call the specified
2355 : * function to record the cache id and hash value in inval.c's lists.
2356 : * SysCacheInvalidate will be called later, if appropriate,
2357 : * using the recorded information.
2358 : *
2359 : * For an insert or delete, tuple is the target tuple and newtuple is NULL.
2360 : * For an update, we are called just once, with tuple being the old tuple
2361 : * version and newtuple the new version. We should make two list entries
2362 : * if the tuple's hash value changed, but only one if it didn't.
2363 : *
2364 : * Note that it is irrelevant whether the given tuple is actually loaded
2365 : * into the catcache at the moment. Even if it's not there now, it might
2366 : * be by the end of the command, or there might be a matching negative entry
2367 : * to flush --- or other backends' caches might have such entries --- so
2368 : * we have to make list entries to flush it later.
2369 : *
2370 : * Also note that it's not an error if there are no catcaches for the
2371 : * specified relation. inval.c doesn't know exactly which rels have
2372 : * catcaches --- it will call this routine for any tuple that's in a
2373 : * system relation.
2374 : */
2375 : void
2376 3140172 : PrepareToInvalidateCacheTuple(Relation relation,
2377 : HeapTuple tuple,
2378 : HeapTuple newtuple,
2379 : void (*function) (int, uint32, Oid, void *),
2380 : void *context)
2381 : {
2382 : slist_iter iter;
2383 : Oid reloid;
2384 :
2385 : CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2386 :
2387 : /*
2388 : * sanity checks
2389 : */
2390 : Assert(RelationIsValid(relation));
2391 : Assert(HeapTupleIsValid(tuple));
2392 : Assert(PointerIsValid(function));
2393 : Assert(CacheHdr != NULL);
2394 :
2395 3140172 : reloid = RelationGetRelid(relation);
2396 :
2397 : /* ----------------
2398 : * for each cache
2399 : * if the cache contains tuples from the specified relation
2400 : * compute the tuple's hash value(s) in this cache,
2401 : * and call the passed function to register the information.
2402 : * ----------------
2403 : */
2404 :
2405 270054792 : slist_foreach(iter, &CacheHdr->ch_caches)
2406 : {
2407 266914620 : CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2408 : uint32 hashvalue;
2409 : Oid dbid;
2410 :
2411 266914620 : if (ccp->cc_reloid != reloid)
2412 261193626 : continue;
2413 :
2414 : /* Just in case cache hasn't finished initialization yet... */
2415 5720994 : ConditionalCatalogCacheInitializeCache(ccp);
2416 :
2417 5720994 : hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2418 5720994 : dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2419 :
2420 5720994 : (*function) (ccp->id, hashvalue, dbid, context);
2421 :
2422 5720994 : if (newtuple)
2423 : {
2424 : uint32 newhashvalue;
2425 :
2426 430780 : newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2427 :
2428 430780 : if (newhashvalue != hashvalue)
2429 6222 : (*function) (ccp->id, newhashvalue, dbid, context);
2430 : }
2431 : }
2432 3140172 : }
2433 :
2434 : /* ResourceOwner callbacks */
2435 :
2436 : static void
2437 10626 : ResOwnerReleaseCatCache(Datum res)
2438 : {
2439 10626 : ReleaseCatCacheWithOwner((HeapTuple) DatumGetPointer(res), NULL);
2440 10626 : }
2441 :
2442 : static char *
2443 0 : ResOwnerPrintCatCache(Datum res)
2444 : {
2445 0 : HeapTuple tuple = (HeapTuple) DatumGetPointer(res);
2446 0 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
2447 : offsetof(CatCTup, tuple));
2448 :
2449 : /* Safety check to ensure we were handed a cache entry */
2450 : Assert(ct->ct_magic == CT_MAGIC);
2451 :
2452 0 : return psprintf("cache %s (%d), tuple %u/%u has count %d",
2453 0 : ct->my_cache->cc_relname, ct->my_cache->id,
2454 0 : ItemPointerGetBlockNumber(&(tuple->t_self)),
2455 0 : ItemPointerGetOffsetNumber(&(tuple->t_self)),
2456 : ct->refcount);
2457 : }
2458 :
2459 : static void
2460 36 : ResOwnerReleaseCatCacheList(Datum res)
2461 : {
2462 36 : ReleaseCatCacheListWithOwner((CatCList *) DatumGetPointer(res), NULL);
2463 36 : }
2464 :
2465 : static char *
2466 0 : ResOwnerPrintCatCacheList(Datum res)
2467 : {
2468 0 : CatCList *list = (CatCList *) DatumGetPointer(res);
2469 :
2470 0 : return psprintf("cache %s (%d), list %p has count %d",
2471 0 : list->my_cache->cc_relname, list->my_cache->id,
2472 : list, list->refcount);
2473 : }
|