Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * catcache.c
4 : * System catalog cache for tuples matching a key.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/utils/cache/catcache.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : #include "postgres.h"
16 :
17 : #include "access/genam.h"
18 : #include "access/heaptoast.h"
19 : #include "access/relscan.h"
20 : #include "access/table.h"
21 : #include "access/xact.h"
22 : #include "catalog/catalog.h"
23 : #include "catalog/pg_collation.h"
24 : #include "catalog/pg_type.h"
25 : #include "common/hashfn.h"
26 : #include "common/pg_prng.h"
27 : #include "miscadmin.h"
28 : #include "port/pg_bitutils.h"
29 : #ifdef CATCACHE_STATS
30 : #include "storage/ipc.h" /* for on_proc_exit */
31 : #endif
32 : #include "storage/lmgr.h"
33 : #include "utils/builtins.h"
34 : #include "utils/catcache.h"
35 : #include "utils/datum.h"
36 : #include "utils/fmgroids.h"
37 : #include "utils/injection_point.h"
38 : #include "utils/inval.h"
39 : #include "utils/memutils.h"
40 : #include "utils/rel.h"
41 : #include "utils/resowner.h"
42 : #include "utils/syscache.h"
43 :
44 : /*
45 : * If a catcache invalidation is processed while we are in the middle of
46 : * creating a catcache entry (or list), it might apply to the entry we're
47 : * creating, making it invalid before it's been inserted to the catcache. To
48 : * catch such cases, we have a stack of "create-in-progress" entries. Cache
49 : * invalidation marks any matching entries in the stack as dead, in addition
50 : * to the actual CatCTup and CatCList entries.
51 : */
52 : typedef struct CatCInProgress
53 : {
54 : CatCache *cache; /* cache that the entry belongs to */
55 : uint32 hash_value; /* hash of the entry; ignored for lists */
56 : bool list; /* is it a list entry? */
57 : bool dead; /* set when the entry is invalidated */
58 : struct CatCInProgress *next;
59 : } CatCInProgress;
60 :
61 : static CatCInProgress *catcache_in_progress_stack = NULL;
62 :
63 : /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
64 :
65 : /*
66 : * Given a hash value and the size of the hash table, find the bucket
67 : * in which the hash value belongs. Since the hash table must contain
68 : * a power-of-2 number of elements, this is a simple bitmask.
69 : */
70 : #define HASH_INDEX(h, sz) ((Index) ((h) & ((sz) - 1)))
71 :
72 :
73 : /*
74 : * variables, macros and other stuff
75 : */
76 :
77 : #ifdef CACHEDEBUG
78 : #define CACHE_elog(...) elog(__VA_ARGS__)
79 : #else
80 : #define CACHE_elog(...)
81 : #endif
82 :
83 : /* Cache management header --- pointer is NULL until created */
84 : static CatCacheHeader *CacheHdr = NULL;
85 :
86 : static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
87 : int nkeys,
88 : Datum v1, Datum v2,
89 : Datum v3, Datum v4);
90 :
91 : static pg_noinline HeapTuple SearchCatCacheMiss(CatCache *cache,
92 : int nkeys,
93 : uint32 hashValue,
94 : Index hashIndex,
95 : Datum v1, Datum v2,
96 : Datum v3, Datum v4);
97 :
98 : static uint32 CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
99 : Datum v1, Datum v2, Datum v3, Datum v4);
100 : static uint32 CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys,
101 : HeapTuple tuple);
102 : static inline bool CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
103 : const Datum *cachekeys,
104 : const Datum *searchkeys);
105 :
106 : #ifdef CATCACHE_STATS
107 : static void CatCachePrintStats(int code, Datum arg);
108 : #endif
109 : static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
110 : static void CatCacheRemoveCList(CatCache *cache, CatCList *cl);
111 : static void RehashCatCache(CatCache *cp);
112 : static void RehashCatCacheLists(CatCache *cp);
113 : static void CatalogCacheInitializeCache(CatCache *cache);
114 : static CatCTup *CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
115 : Datum *arguments,
116 : uint32 hashValue, Index hashIndex);
117 :
118 : static void ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner);
119 : static void ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner);
120 : static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
121 : Datum *keys);
122 : static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
123 : Datum *srckeys, Datum *dstkeys);
124 :
125 :
126 : /*
127 : * internal support functions
128 : */
129 :
130 : /* ResourceOwner callbacks to hold catcache references */
131 :
132 : static void ResOwnerReleaseCatCache(Datum res);
133 : static char *ResOwnerPrintCatCache(Datum res);
134 : static void ResOwnerReleaseCatCacheList(Datum res);
135 : static char *ResOwnerPrintCatCacheList(Datum res);
136 :
137 : static const ResourceOwnerDesc catcache_resowner_desc =
138 : {
139 : /* catcache references */
140 : .name = "catcache reference",
141 : .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
142 : .release_priority = RELEASE_PRIO_CATCACHE_REFS,
143 : .ReleaseResource = ResOwnerReleaseCatCache,
144 : .DebugPrint = ResOwnerPrintCatCache
145 : };
146 :
147 : static const ResourceOwnerDesc catlistref_resowner_desc =
148 : {
149 : /* catcache-list pins */
150 : .name = "catcache list reference",
151 : .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
152 : .release_priority = RELEASE_PRIO_CATCACHE_LIST_REFS,
153 : .ReleaseResource = ResOwnerReleaseCatCacheList,
154 : .DebugPrint = ResOwnerPrintCatCacheList
155 : };
156 :
157 : /* Convenience wrappers over ResourceOwnerRemember/Forget */
158 : static inline void
159 88805744 : ResourceOwnerRememberCatCacheRef(ResourceOwner owner, HeapTuple tuple)
160 : {
161 88805744 : ResourceOwnerRemember(owner, PointerGetDatum(tuple), &catcache_resowner_desc);
162 88805744 : }
163 : static inline void
164 88794974 : ResourceOwnerForgetCatCacheRef(ResourceOwner owner, HeapTuple tuple)
165 : {
166 88794974 : ResourceOwnerForget(owner, PointerGetDatum(tuple), &catcache_resowner_desc);
167 88794974 : }
168 : static inline void
169 3837360 : ResourceOwnerRememberCatCacheListRef(ResourceOwner owner, CatCList *list)
170 : {
171 3837360 : ResourceOwnerRemember(owner, PointerGetDatum(list), &catlistref_resowner_desc);
172 3837360 : }
173 : static inline void
174 3837324 : ResourceOwnerForgetCatCacheListRef(ResourceOwner owner, CatCList *list)
175 : {
176 3837324 : ResourceOwnerForget(owner, PointerGetDatum(list), &catlistref_resowner_desc);
177 3837324 : }
178 :
179 :
180 : /*
181 : * Hash and equality functions for system types that are used as cache key
182 : * fields. In some cases, we just call the regular SQL-callable functions for
183 : * the appropriate data type, but that tends to be a little slow, and the
184 : * speed of these functions is performance-critical. Therefore, for data
185 : * types that frequently occur as catcache keys, we hard-code the logic here.
186 : * Avoiding the overhead of DirectFunctionCallN(...) is a substantial win, and
187 : * in certain cases (like int4) we can adopt a faster hash algorithm as well.
188 : */
189 :
190 : static bool
191 5693888 : chareqfast(Datum a, Datum b)
192 : {
193 5693888 : return DatumGetChar(a) == DatumGetChar(b);
194 : }
195 :
196 : static uint32
197 6521254 : charhashfast(Datum datum)
198 : {
199 6521254 : return murmurhash32((int32) DatumGetChar(datum));
200 : }
201 :
202 : static bool
203 3950002 : nameeqfast(Datum a, Datum b)
204 : {
205 3950002 : char *ca = NameStr(*DatumGetName(a));
206 3950002 : char *cb = NameStr(*DatumGetName(b));
207 :
208 3950002 : return strncmp(ca, cb, NAMEDATALEN) == 0;
209 : }
210 :
211 : static uint32
212 9093968 : namehashfast(Datum datum)
213 : {
214 9093968 : char *key = NameStr(*DatumGetName(datum));
215 :
216 9093968 : return hash_any((unsigned char *) key, strlen(key));
217 : }
218 :
219 : static bool
220 8935284 : int2eqfast(Datum a, Datum b)
221 : {
222 8935284 : return DatumGetInt16(a) == DatumGetInt16(b);
223 : }
224 :
225 : static uint32
226 12516316 : int2hashfast(Datum datum)
227 : {
228 12516316 : return murmurhash32((int32) DatumGetInt16(datum));
229 : }
230 :
231 : static bool
232 103051688 : int4eqfast(Datum a, Datum b)
233 : {
234 103051688 : return DatumGetInt32(a) == DatumGetInt32(b);
235 : }
236 :
237 : static uint32
238 121060550 : int4hashfast(Datum datum)
239 : {
240 121060550 : return murmurhash32((int32) DatumGetInt32(datum));
241 : }
242 :
243 : static bool
244 166 : texteqfast(Datum a, Datum b)
245 : {
246 : /*
247 : * The use of DEFAULT_COLLATION_OID is fairly arbitrary here. We just
248 : * want to take the fast "deterministic" path in texteq().
249 : */
250 166 : return DatumGetBool(DirectFunctionCall2Coll(texteq, DEFAULT_COLLATION_OID, a, b));
251 : }
252 :
253 : static uint32
254 3768 : texthashfast(Datum datum)
255 : {
256 : /* analogously here as in texteqfast() */
257 3768 : return DatumGetInt32(DirectFunctionCall1Coll(hashtext, DEFAULT_COLLATION_OID, datum));
258 : }
259 :
260 : static bool
261 3106 : oidvectoreqfast(Datum a, Datum b)
262 : {
263 3106 : return DatumGetBool(DirectFunctionCall2(oidvectoreq, a, b));
264 : }
265 :
266 : static uint32
267 402558 : oidvectorhashfast(Datum datum)
268 : {
269 402558 : return DatumGetInt32(DirectFunctionCall1(hashoidvector, datum));
270 : }
271 :
272 : /* Lookup support functions for a type. */
273 : static void
274 1254986 : GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEqualFN *fasteqfunc)
275 : {
276 1254986 : switch (keytype)
277 : {
278 16626 : case BOOLOID:
279 16626 : *hashfunc = charhashfast;
280 16626 : *fasteqfunc = chareqfast;
281 16626 : *eqfunc = F_BOOLEQ;
282 16626 : break;
283 21316 : case CHAROID:
284 21316 : *hashfunc = charhashfast;
285 21316 : *fasteqfunc = chareqfast;
286 21316 : *eqfunc = F_CHAREQ;
287 21316 : break;
288 233454 : case NAMEOID:
289 233454 : *hashfunc = namehashfast;
290 233454 : *fasteqfunc = nameeqfast;
291 233454 : *eqfunc = F_NAMEEQ;
292 233454 : break;
293 70060 : case INT2OID:
294 70060 : *hashfunc = int2hashfast;
295 70060 : *fasteqfunc = int2eqfast;
296 70060 : *eqfunc = F_INT2EQ;
297 70060 : break;
298 20752 : case INT4OID:
299 20752 : *hashfunc = int4hashfast;
300 20752 : *fasteqfunc = int4eqfast;
301 20752 : *eqfunc = F_INT4EQ;
302 20752 : break;
303 9182 : case TEXTOID:
304 9182 : *hashfunc = texthashfast;
305 9182 : *fasteqfunc = texteqfast;
306 9182 : *eqfunc = F_TEXTEQ;
307 9182 : break;
308 867150 : case OIDOID:
309 : case REGPROCOID:
310 : case REGPROCEDUREOID:
311 : case REGOPEROID:
312 : case REGOPERATOROID:
313 : case REGCLASSOID:
314 : case REGTYPEOID:
315 : case REGCOLLATIONOID:
316 : case REGCONFIGOID:
317 : case REGDICTIONARYOID:
318 : case REGROLEOID:
319 : case REGNAMESPACEOID:
320 : case REGDATABASEOID:
321 867150 : *hashfunc = int4hashfast;
322 867150 : *fasteqfunc = int4eqfast;
323 867150 : *eqfunc = F_OIDEQ;
324 867150 : break;
325 16446 : case OIDVECTOROID:
326 16446 : *hashfunc = oidvectorhashfast;
327 16446 : *fasteqfunc = oidvectoreqfast;
328 16446 : *eqfunc = F_OIDVECTOREQ;
329 16446 : break;
330 0 : default:
331 0 : elog(FATAL, "type %u not supported as catcache key", keytype);
332 : *hashfunc = NULL; /* keep compiler quiet */
333 :
334 : *eqfunc = InvalidOid;
335 : break;
336 : }
337 1254986 : }
338 :
339 : /*
340 : * CatalogCacheComputeHashValue
341 : *
342 : * Compute the hash value associated with a given set of lookup keys
343 : */
344 : static uint32
345 107147556 : CatalogCacheComputeHashValue(CatCache *cache, int nkeys,
346 : Datum v1, Datum v2, Datum v3, Datum v4)
347 : {
348 107147556 : uint32 hashValue = 0;
349 : uint32 oneHash;
350 107147556 : CCHashFN *cc_hashfunc = cache->cc_hashfunc;
351 :
352 : CACHE_elog(DEBUG2, "CatalogCacheComputeHashValue %s %d %p",
353 : cache->cc_relname, nkeys, cache);
354 :
355 107147556 : switch (nkeys)
356 : {
357 4856032 : case 4:
358 4856032 : oneHash = (cc_hashfunc[3]) (v4);
359 4856032 : hashValue ^= pg_rotate_left32(oneHash, 24);
360 : /* FALLTHROUGH */
361 12221768 : case 3:
362 12221768 : oneHash = (cc_hashfunc[2]) (v3);
363 12221768 : hashValue ^= pg_rotate_left32(oneHash, 16);
364 : /* FALLTHROUGH */
365 25386050 : case 2:
366 25386050 : oneHash = (cc_hashfunc[1]) (v2);
367 25386050 : hashValue ^= pg_rotate_left32(oneHash, 8);
368 : /* FALLTHROUGH */
369 107147556 : case 1:
370 107147556 : oneHash = (cc_hashfunc[0]) (v1);
371 107147556 : hashValue ^= oneHash;
372 107147556 : break;
373 0 : default:
374 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
375 : break;
376 : }
377 :
378 107147556 : return hashValue;
379 : }
380 :
381 : /*
382 : * CatalogCacheComputeTupleHashValue
383 : *
384 : * Compute the hash value associated with a given tuple to be cached
385 : */
386 : static uint32
387 7400912 : CatalogCacheComputeTupleHashValue(CatCache *cache, int nkeys, HeapTuple tuple)
388 : {
389 7400912 : Datum v1 = 0,
390 7400912 : v2 = 0,
391 7400912 : v3 = 0,
392 7400912 : v4 = 0;
393 7400912 : bool isNull = false;
394 7400912 : int *cc_keyno = cache->cc_keyno;
395 7400912 : TupleDesc cc_tupdesc = cache->cc_tupdesc;
396 :
397 : /* Now extract key fields from tuple, insert into scankey */
398 7400912 : switch (nkeys)
399 : {
400 464804 : case 4:
401 464804 : v4 = fastgetattr(tuple,
402 464804 : cc_keyno[3],
403 : cc_tupdesc,
404 : &isNull);
405 : Assert(!isNull);
406 : /* FALLTHROUGH */
407 1317346 : case 3:
408 1317346 : v3 = fastgetattr(tuple,
409 1317346 : cc_keyno[2],
410 : cc_tupdesc,
411 : &isNull);
412 : Assert(!isNull);
413 : /* FALLTHROUGH */
414 5353834 : case 2:
415 5353834 : v2 = fastgetattr(tuple,
416 5353834 : cc_keyno[1],
417 : cc_tupdesc,
418 : &isNull);
419 : Assert(!isNull);
420 : /* FALLTHROUGH */
421 7400912 : case 1:
422 7400912 : v1 = fastgetattr(tuple,
423 : cc_keyno[0],
424 : cc_tupdesc,
425 : &isNull);
426 : Assert(!isNull);
427 7400912 : break;
428 0 : default:
429 0 : elog(FATAL, "wrong number of hash keys: %d", nkeys);
430 : break;
431 : }
432 :
433 7400912 : return CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
434 : }
435 :
436 : /*
437 : * CatalogCacheCompareTuple
438 : *
439 : * Compare a tuple to the passed arguments.
440 : */
441 : static inline bool
442 91708010 : CatalogCacheCompareTuple(const CatCache *cache, int nkeys,
443 : const Datum *cachekeys,
444 : const Datum *searchkeys)
445 : {
446 91708010 : const CCFastEqualFN *cc_fastequal = cache->cc_fastequal;
447 : int i;
448 :
449 213354502 : for (i = 0; i < nkeys; i++)
450 : {
451 121646492 : if (!(cc_fastequal[i]) (cachekeys[i], searchkeys[i]))
452 0 : return false;
453 : }
454 91708010 : return true;
455 : }
456 :
457 :
458 : #ifdef CATCACHE_STATS
459 :
460 : static void
461 : CatCachePrintStats(int code, Datum arg)
462 : {
463 : slist_iter iter;
464 : long cc_searches = 0;
465 : long cc_hits = 0;
466 : long cc_neg_hits = 0;
467 : long cc_newloads = 0;
468 : long cc_invals = 0;
469 : long cc_nlists = 0;
470 : long cc_lsearches = 0;
471 : long cc_lhits = 0;
472 :
473 : slist_foreach(iter, &CacheHdr->ch_caches)
474 : {
475 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
476 :
477 : if (cache->cc_ntup == 0 && cache->cc_searches == 0)
478 : continue; /* don't print unused caches */
479 : elog(DEBUG2, "catcache %s/%u: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %d lists, %ld lsrch, %ld lhits",
480 : cache->cc_relname,
481 : cache->cc_indexoid,
482 : cache->cc_ntup,
483 : cache->cc_searches,
484 : cache->cc_hits,
485 : cache->cc_neg_hits,
486 : cache->cc_hits + cache->cc_neg_hits,
487 : cache->cc_newloads,
488 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits - cache->cc_newloads,
489 : cache->cc_searches - cache->cc_hits - cache->cc_neg_hits,
490 : cache->cc_invals,
491 : cache->cc_nlist,
492 : cache->cc_lsearches,
493 : cache->cc_lhits);
494 : cc_searches += cache->cc_searches;
495 : cc_hits += cache->cc_hits;
496 : cc_neg_hits += cache->cc_neg_hits;
497 : cc_newloads += cache->cc_newloads;
498 : cc_invals += cache->cc_invals;
499 : cc_nlists += cache->cc_nlist;
500 : cc_lsearches += cache->cc_lsearches;
501 : cc_lhits += cache->cc_lhits;
502 : }
503 : elog(DEBUG2, "catcache totals: %d tup, %ld srch, %ld+%ld=%ld hits, %ld+%ld=%ld loads, %ld invals, %ld lists, %ld lsrch, %ld lhits",
504 : CacheHdr->ch_ntup,
505 : cc_searches,
506 : cc_hits,
507 : cc_neg_hits,
508 : cc_hits + cc_neg_hits,
509 : cc_newloads,
510 : cc_searches - cc_hits - cc_neg_hits - cc_newloads,
511 : cc_searches - cc_hits - cc_neg_hits,
512 : cc_invals,
513 : cc_nlists,
514 : cc_lsearches,
515 : cc_lhits);
516 : }
517 : #endif /* CATCACHE_STATS */
518 :
519 :
520 : /*
521 : * CatCacheRemoveCTup
522 : *
523 : * Unlink and delete the given cache entry
524 : *
525 : * NB: if it is a member of a CatCList, the CatCList is deleted too.
526 : * Both the cache entry and the list had better have zero refcount.
527 : */
528 : static void
529 1580716 : CatCacheRemoveCTup(CatCache *cache, CatCTup *ct)
530 : {
531 : Assert(ct->refcount == 0);
532 : Assert(ct->my_cache == cache);
533 :
534 1580716 : if (ct->c_list)
535 : {
536 : /*
537 : * The cleanest way to handle this is to call CatCacheRemoveCList,
538 : * which will recurse back to me, and the recursive call will do the
539 : * work. Set the "dead" flag to make sure it does recurse.
540 : */
541 0 : ct->dead = true;
542 0 : CatCacheRemoveCList(cache, ct->c_list);
543 0 : return; /* nothing left to do */
544 : }
545 :
546 : /* delink from linked list */
547 1580716 : dlist_delete(&ct->cache_elem);
548 :
549 : /*
550 : * Free keys when we're dealing with a negative entry, normal entries just
551 : * point into tuple, allocated together with the CatCTup.
552 : */
553 1580716 : if (ct->negative)
554 454176 : CatCacheFreeKeys(cache->cc_tupdesc, cache->cc_nkeys,
555 454176 : cache->cc_keyno, ct->keys);
556 :
557 1580716 : pfree(ct);
558 :
559 1580716 : --cache->cc_ntup;
560 1580716 : --CacheHdr->ch_ntup;
561 : }
562 :
563 : /*
564 : * CatCacheRemoveCList
565 : *
566 : * Unlink and delete the given cache list entry
567 : *
568 : * NB: any dead member entries that become unreferenced are deleted too.
569 : */
570 : static void
571 129866 : CatCacheRemoveCList(CatCache *cache, CatCList *cl)
572 : {
573 : int i;
574 :
575 : Assert(cl->refcount == 0);
576 : Assert(cl->my_cache == cache);
577 :
578 : /* delink from member tuples */
579 432068 : for (i = cl->n_members; --i >= 0;)
580 : {
581 302202 : CatCTup *ct = cl->members[i];
582 :
583 : Assert(ct->c_list == cl);
584 302202 : ct->c_list = NULL;
585 : /* if the member is dead and now has no references, remove it */
586 302202 : if (
587 : #ifndef CATCACHE_FORCE_RELEASE
588 302202 : ct->dead &&
589 : #endif
590 144 : ct->refcount == 0)
591 144 : CatCacheRemoveCTup(cache, ct);
592 : }
593 :
594 : /* delink from linked list */
595 129866 : dlist_delete(&cl->cache_elem);
596 :
597 : /* free associated column data */
598 129866 : CatCacheFreeKeys(cache->cc_tupdesc, cl->nkeys,
599 129866 : cache->cc_keyno, cl->keys);
600 :
601 129866 : pfree(cl);
602 :
603 129866 : --cache->cc_nlist;
604 129866 : }
605 :
606 :
607 : /*
608 : * CatCacheInvalidate
609 : *
610 : * Invalidate entries in the specified cache, given a hash value.
611 : *
612 : * We delete cache entries that match the hash value, whether positive
613 : * or negative. We don't care whether the invalidation is the result
614 : * of a tuple insertion or a deletion.
615 : *
616 : * We used to try to match positive cache entries by TID, but that is
617 : * unsafe after a VACUUM FULL on a system catalog: an inval event could
618 : * be queued before VACUUM FULL, and then processed afterwards, when the
619 : * target tuple that has to be invalidated has a different TID than it
620 : * did when the event was created. So now we just compare hash values and
621 : * accept the small risk of unnecessary invalidations due to false matches.
622 : *
623 : * This routine is only quasi-public: it should only be used by inval.c.
624 : */
625 : void
626 22734444 : CatCacheInvalidate(CatCache *cache, uint32 hashValue)
627 : {
628 : Index hashIndex;
629 : dlist_mutable_iter iter;
630 :
631 : CACHE_elog(DEBUG2, "CatCacheInvalidate: called");
632 :
633 : /*
634 : * We don't bother to check whether the cache has finished initialization
635 : * yet; if not, there will be no entries in it so no problem.
636 : */
637 :
638 : /*
639 : * Invalidate *all* CatCLists in this cache; it's too hard to tell which
640 : * searches might still be correct, so just zap 'em all.
641 : */
642 26693740 : for (int i = 0; i < cache->cc_nlbuckets; i++)
643 : {
644 3959296 : dlist_head *bucket = &cache->cc_lbucket[i];
645 :
646 4084786 : dlist_foreach_modify(iter, bucket)
647 : {
648 125490 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
649 :
650 125490 : if (cl->refcount > 0)
651 144 : cl->dead = true;
652 : else
653 125346 : CatCacheRemoveCList(cache, cl);
654 : }
655 : }
656 :
657 : /*
658 : * inspect the proper hash bucket for tuple matches
659 : */
660 22734444 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
661 31237938 : dlist_foreach_modify(iter, &cache->cc_bucket[hashIndex])
662 : {
663 8503494 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
664 :
665 8503494 : if (hashValue == ct->hash_value)
666 : {
667 1398850 : if (ct->refcount > 0 ||
668 1397400 : (ct->c_list && ct->c_list->refcount > 0))
669 : {
670 1594 : ct->dead = true;
671 : /* list, if any, was marked dead above */
672 1594 : Assert(ct->c_list == NULL || ct->c_list->dead);
673 : }
674 : else
675 1397256 : CatCacheRemoveCTup(cache, ct);
676 : CACHE_elog(DEBUG2, "CatCacheInvalidate: invalidated");
677 : #ifdef CATCACHE_STATS
678 : cache->cc_invals++;
679 : #endif
680 : /* could be multiple matches, so keep looking! */
681 : }
682 : }
683 :
684 : /* Also invalidate any entries that are being built */
685 22874384 : for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
686 : {
687 139940 : if (e->cache == cache)
688 : {
689 670 : if (e->list || e->hash_value == hashValue)
690 664 : e->dead = true;
691 : }
692 : }
693 22734444 : }
694 :
695 : /* ----------------------------------------------------------------
696 : * public functions
697 : * ----------------------------------------------------------------
698 : */
699 :
700 :
701 : /*
702 : * Standard routine for creating cache context if it doesn't exist yet
703 : *
704 : * There are a lot of places (probably far more than necessary) that check
705 : * whether CacheMemoryContext exists yet and want to create it if not.
706 : * We centralize knowledge of exactly how to create it here.
707 : */
708 : void
709 33936 : CreateCacheMemoryContext(void)
710 : {
711 : /*
712 : * Purely for paranoia, check that context doesn't exist; caller probably
713 : * did so already.
714 : */
715 33936 : if (!CacheMemoryContext)
716 33936 : CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
717 : "CacheMemoryContext",
718 : ALLOCSET_DEFAULT_SIZES);
719 33936 : }
720 :
721 :
722 : /*
723 : * ResetCatalogCache
724 : *
725 : * Reset one catalog cache to empty.
726 : *
727 : * This is not very efficient if the target cache is nearly empty.
728 : * However, it shouldn't need to be efficient; we don't invoke it often.
729 : *
730 : * If 'debug_discard' is true, we are being called as part of
731 : * debug_discard_caches. In that case, the cache is not reset for
732 : * correctness, but just to get more testing of cache invalidation. We skip
733 : * resetting in-progress build entries in that case, or we'd never make any
734 : * progress.
735 : */
736 : static void
737 350070 : ResetCatalogCache(CatCache *cache, bool debug_discard)
738 : {
739 : dlist_mutable_iter iter;
740 : int i;
741 :
742 : /* Remove each list in this cache, or at least mark it dead */
743 393078 : for (i = 0; i < cache->cc_nlbuckets; i++)
744 : {
745 43008 : dlist_head *bucket = &cache->cc_lbucket[i];
746 :
747 47522 : dlist_foreach_modify(iter, bucket)
748 : {
749 4514 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
750 :
751 4514 : if (cl->refcount > 0)
752 0 : cl->dead = true;
753 : else
754 4514 : CatCacheRemoveCList(cache, cl);
755 : }
756 : }
757 :
758 : /* Remove each tuple in this cache, or at least mark it dead */
759 10477878 : for (i = 0; i < cache->cc_nbuckets; i++)
760 : {
761 10127808 : dlist_head *bucket = &cache->cc_bucket[i];
762 :
763 10309688 : dlist_foreach_modify(iter, bucket)
764 : {
765 181880 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
766 :
767 181880 : if (ct->refcount > 0 ||
768 181874 : (ct->c_list && ct->c_list->refcount > 0))
769 : {
770 6 : ct->dead = true;
771 : /* list, if any, was marked dead above */
772 6 : Assert(ct->c_list == NULL || ct->c_list->dead);
773 : }
774 : else
775 181874 : CatCacheRemoveCTup(cache, ct);
776 : #ifdef CATCACHE_STATS
777 : cache->cc_invals++;
778 : #endif
779 : }
780 : }
781 :
782 : /* Also invalidate any entries that are being built */
783 350070 : if (!debug_discard)
784 : {
785 350580 : for (CatCInProgress *e = catcache_in_progress_stack; e != NULL; e = e->next)
786 : {
787 510 : if (e->cache == cache)
788 6 : e->dead = true;
789 : }
790 : }
791 350070 : }
792 :
793 : /*
794 : * ResetCatalogCaches
795 : *
796 : * Reset all caches when a shared cache inval event forces it
797 : */
798 : void
799 0 : ResetCatalogCaches(void)
800 : {
801 0 : ResetCatalogCachesExt(false);
802 0 : }
803 :
804 : void
805 4106 : ResetCatalogCachesExt(bool debug_discard)
806 : {
807 : slist_iter iter;
808 :
809 : CACHE_elog(DEBUG2, "ResetCatalogCaches called");
810 :
811 353116 : slist_foreach(iter, &CacheHdr->ch_caches)
812 : {
813 349010 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
814 :
815 349010 : ResetCatalogCache(cache, debug_discard);
816 : }
817 :
818 : CACHE_elog(DEBUG2, "end of ResetCatalogCaches call");
819 4106 : }
820 :
821 : /*
822 : * CatalogCacheFlushCatalog
823 : *
824 : * Flush all catcache entries that came from the specified system catalog.
825 : * This is needed after VACUUM FULL/CLUSTER on the catalog, since the
826 : * tuples very likely now have different TIDs than before. (At one point
827 : * we also tried to force re-execution of CatalogCacheInitializeCache for
828 : * the cache(s) on that catalog. This is a bad idea since it leads to all
829 : * kinds of trouble if a cache flush occurs while loading cache entries.
830 : * We now avoid the need to do it by copying cc_tupdesc out of the relcache,
831 : * rather than relying on the relcache to keep a tupdesc for us. Of course
832 : * this assumes the tupdesc of a cachable system table will not change...)
833 : */
834 : void
835 776 : CatalogCacheFlushCatalog(Oid catId)
836 : {
837 : slist_iter iter;
838 :
839 : CACHE_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
840 :
841 66736 : slist_foreach(iter, &CacheHdr->ch_caches)
842 : {
843 65960 : CatCache *cache = slist_container(CatCache, cc_next, iter.cur);
844 :
845 : /* Does this cache store tuples of the target catalog? */
846 65960 : if (cache->cc_reloid == catId)
847 : {
848 : /* Yes, so flush all its contents */
849 1060 : ResetCatalogCache(cache, false);
850 :
851 : /* Tell inval.c to call syscache callbacks for this cache */
852 1060 : CallSyscacheCallbacks(cache->id, 0);
853 : }
854 : }
855 :
856 : CACHE_elog(DEBUG2, "end of CatalogCacheFlushCatalog call");
857 776 : }
858 :
859 : /*
860 : * InitCatCache
861 : *
862 : * This allocates and initializes a cache for a system catalog relation.
863 : * Actually, the cache is only partially initialized to avoid opening the
864 : * relation. The relation will be opened and the rest of the cache
865 : * structure initialized on the first access.
866 : */
867 : #ifdef CACHEDEBUG
868 : #define InitCatCache_DEBUG2 \
869 : do { \
870 : elog(DEBUG2, "InitCatCache: rel=%u ind=%u id=%d nkeys=%d size=%d", \
871 : cp->cc_reloid, cp->cc_indexoid, cp->id, \
872 : cp->cc_nkeys, cp->cc_nbuckets); \
873 : } while(0)
874 : #else
875 : #define InitCatCache_DEBUG2
876 : #endif
877 :
878 : CatCache *
879 2884560 : InitCatCache(int id,
880 : Oid reloid,
881 : Oid indexoid,
882 : int nkeys,
883 : const int *key,
884 : int nbuckets)
885 : {
886 : CatCache *cp;
887 : MemoryContext oldcxt;
888 : int i;
889 :
890 : /*
891 : * nbuckets is the initial number of hash buckets to use in this catcache.
892 : * It will be enlarged later if it becomes too full.
893 : *
894 : * nbuckets must be a power of two. We check this via Assert rather than
895 : * a full runtime check because the values will be coming from constant
896 : * tables.
897 : *
898 : * If you're confused by the power-of-two check, see comments in
899 : * bitmapset.c for an explanation.
900 : */
901 : Assert(nbuckets > 0 && (nbuckets & -nbuckets) == nbuckets);
902 :
903 : /*
904 : * first switch to the cache context so our allocations do not vanish at
905 : * the end of a transaction
906 : */
907 2884560 : if (!CacheMemoryContext)
908 0 : CreateCacheMemoryContext();
909 :
910 2884560 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
911 :
912 : /*
913 : * if first time through, initialize the cache group header
914 : */
915 2884560 : if (CacheHdr == NULL)
916 : {
917 33936 : CacheHdr = (CatCacheHeader *) palloc(sizeof(CatCacheHeader));
918 33936 : slist_init(&CacheHdr->ch_caches);
919 33936 : CacheHdr->ch_ntup = 0;
920 : #ifdef CATCACHE_STATS
921 : /* set up to dump stats at backend exit */
922 : on_proc_exit(CatCachePrintStats, 0);
923 : #endif
924 : }
925 :
926 : /*
927 : * Allocate a new cache structure, aligning to a cacheline boundary
928 : *
929 : * Note: we rely on zeroing to initialize all the dlist headers correctly
930 : */
931 2884560 : cp = (CatCache *) palloc_aligned(sizeof(CatCache), PG_CACHE_LINE_SIZE,
932 : MCXT_ALLOC_ZERO);
933 2884560 : cp->cc_bucket = palloc0(nbuckets * sizeof(dlist_head));
934 :
935 : /*
936 : * Many catcaches never receive any list searches. Therefore, we don't
937 : * allocate the cc_lbuckets till we get a list search.
938 : */
939 2884560 : cp->cc_lbucket = NULL;
940 :
941 : /*
942 : * initialize the cache's relation information for the relation
943 : * corresponding to this cache, and initialize some of the new cache's
944 : * other internal fields. But don't open the relation yet.
945 : */
946 2884560 : cp->id = id;
947 2884560 : cp->cc_relname = "(not known yet)";
948 2884560 : cp->cc_reloid = reloid;
949 2884560 : cp->cc_indexoid = indexoid;
950 2884560 : cp->cc_relisshared = false; /* temporary */
951 2884560 : cp->cc_tupdesc = (TupleDesc) NULL;
952 2884560 : cp->cc_ntup = 0;
953 2884560 : cp->cc_nlist = 0;
954 2884560 : cp->cc_nbuckets = nbuckets;
955 2884560 : cp->cc_nlbuckets = 0;
956 2884560 : cp->cc_nkeys = nkeys;
957 7533792 : for (i = 0; i < nkeys; ++i)
958 : {
959 : Assert(AttributeNumberIsValid(key[i]));
960 4649232 : cp->cc_keyno[i] = key[i];
961 : }
962 :
963 : /*
964 : * new cache is initialized as far as we can go for now. print some
965 : * debugging information, if appropriate.
966 : */
967 : InitCatCache_DEBUG2;
968 :
969 : /*
970 : * add completed cache to top of group header's list
971 : */
972 2884560 : slist_push_head(&CacheHdr->ch_caches, &cp->cc_next);
973 :
974 : /*
975 : * back to the old context before we return...
976 : */
977 2884560 : MemoryContextSwitchTo(oldcxt);
978 :
979 2884560 : return cp;
980 : }
981 :
982 : /*
983 : * Enlarge a catcache, doubling the number of buckets.
984 : */
985 : static void
986 6402 : RehashCatCache(CatCache *cp)
987 : {
988 : dlist_head *newbucket;
989 : int newnbuckets;
990 : int i;
991 :
992 6402 : elog(DEBUG1, "rehashing catalog cache id %d for %s; %d tups, %d buckets",
993 : cp->id, cp->cc_relname, cp->cc_ntup, cp->cc_nbuckets);
994 :
995 : /* Allocate a new, larger, hash table. */
996 6402 : newnbuckets = cp->cc_nbuckets * 2;
997 6402 : newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
998 :
999 : /* Move all entries from old hash table to new. */
1000 573950 : for (i = 0; i < cp->cc_nbuckets; i++)
1001 : {
1002 : dlist_mutable_iter iter;
1003 :
1004 1709046 : dlist_foreach_modify(iter, &cp->cc_bucket[i])
1005 : {
1006 1141498 : CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
1007 1141498 : int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
1008 :
1009 1141498 : dlist_delete(iter.cur);
1010 1141498 : dlist_push_head(&newbucket[hashIndex], &ct->cache_elem);
1011 : }
1012 : }
1013 :
1014 : /* Switch to the new array. */
1015 6402 : pfree(cp->cc_bucket);
1016 6402 : cp->cc_nbuckets = newnbuckets;
1017 6402 : cp->cc_bucket = newbucket;
1018 6402 : }
1019 :
1020 : /*
1021 : * Enlarge a catcache's list storage, doubling the number of buckets.
1022 : */
1023 : static void
1024 1220 : RehashCatCacheLists(CatCache *cp)
1025 : {
1026 : dlist_head *newbucket;
1027 : int newnbuckets;
1028 : int i;
1029 :
1030 1220 : elog(DEBUG1, "rehashing catalog cache id %d for %s; %d lists, %d buckets",
1031 : cp->id, cp->cc_relname, cp->cc_nlist, cp->cc_nlbuckets);
1032 :
1033 : /* Allocate a new, larger, hash table. */
1034 1220 : newnbuckets = cp->cc_nlbuckets * 2;
1035 1220 : newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
1036 :
1037 : /* Move all entries from old hash table to new. */
1038 45732 : for (i = 0; i < cp->cc_nlbuckets; i++)
1039 : {
1040 : dlist_mutable_iter iter;
1041 :
1042 134756 : dlist_foreach_modify(iter, &cp->cc_lbucket[i])
1043 : {
1044 90244 : CatCList *cl = dlist_container(CatCList, cache_elem, iter.cur);
1045 90244 : int hashIndex = HASH_INDEX(cl->hash_value, newnbuckets);
1046 :
1047 90244 : dlist_delete(iter.cur);
1048 90244 : dlist_push_head(&newbucket[hashIndex], &cl->cache_elem);
1049 : }
1050 : }
1051 :
1052 : /* Switch to the new array. */
1053 1220 : pfree(cp->cc_lbucket);
1054 1220 : cp->cc_nlbuckets = newnbuckets;
1055 1220 : cp->cc_lbucket = newbucket;
1056 1220 : }
1057 :
1058 : /*
1059 : * ConditionalCatalogCacheInitializeCache
1060 : *
1061 : * Call CatalogCacheInitializeCache() if not yet done.
1062 : */
1063 : pg_attribute_always_inline
1064 : static void
1065 106106440 : ConditionalCatalogCacheInitializeCache(CatCache *cache)
1066 : {
1067 : #ifdef USE_ASSERT_CHECKING
1068 : /*
1069 : * TypeCacheRelCallback() runs outside transactions and relies on TYPEOID
1070 : * for hashing. This isn't ideal. Since lookup_type_cache() both
1071 : * registers the callback and searches TYPEOID, reaching trouble likely
1072 : * requires OOM at an unlucky moment.
1073 : *
1074 : * InvalidateAttoptCacheCallback() runs outside transactions and likewise
1075 : * relies on ATTNUM. InitPostgres() initializes ATTNUM, so it's reliable.
1076 : */
1077 : if (!(cache->id == TYPEOID || cache->id == ATTNUM) ||
1078 : IsTransactionState())
1079 : AssertCouldGetRelation();
1080 : else
1081 : Assert(cache->cc_tupdesc != NULL);
1082 : #endif
1083 :
1084 106106440 : if (unlikely(cache->cc_tupdesc == NULL))
1085 793432 : CatalogCacheInitializeCache(cache);
1086 106106438 : }
1087 :
1088 : /*
1089 : * CatalogCacheInitializeCache
1090 : *
1091 : * This function does final initialization of a catcache: obtain the tuple
1092 : * descriptor and set up the hash and equality function links.
1093 : */
1094 : #ifdef CACHEDEBUG
1095 : #define CatalogCacheInitializeCache_DEBUG1 \
1096 : elog(DEBUG2, "CatalogCacheInitializeCache: cache @%p rel=%u", cache, \
1097 : cache->cc_reloid)
1098 :
1099 : #define CatalogCacheInitializeCache_DEBUG2 \
1100 : do { \
1101 : if (cache->cc_keyno[i] > 0) { \
1102 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
1103 : i+1, cache->cc_nkeys, cache->cc_keyno[i], \
1104 : TupleDescAttr(tupdesc, cache->cc_keyno[i] - 1)->atttypid); \
1105 : } else { \
1106 : elog(DEBUG2, "CatalogCacheInitializeCache: load %d/%d w/%d", \
1107 : i+1, cache->cc_nkeys, cache->cc_keyno[i]); \
1108 : } \
1109 : } while(0)
1110 : #else
1111 : #define CatalogCacheInitializeCache_DEBUG1
1112 : #define CatalogCacheInitializeCache_DEBUG2
1113 : #endif
1114 :
1115 : static void
1116 793432 : CatalogCacheInitializeCache(CatCache *cache)
1117 : {
1118 : Relation relation;
1119 : MemoryContext oldcxt;
1120 : TupleDesc tupdesc;
1121 : int i;
1122 :
1123 : CatalogCacheInitializeCache_DEBUG1;
1124 :
1125 793432 : relation = table_open(cache->cc_reloid, AccessShareLock);
1126 :
1127 : /*
1128 : * switch to the cache context so our allocations do not vanish at the end
1129 : * of a transaction
1130 : */
1131 : Assert(CacheMemoryContext != NULL);
1132 :
1133 793430 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1134 :
1135 : /*
1136 : * copy the relcache's tuple descriptor to permanent cache storage
1137 : */
1138 793430 : tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
1139 :
1140 : /*
1141 : * save the relation's name and relisshared flag, too (cc_relname is used
1142 : * only for debugging purposes)
1143 : */
1144 793430 : cache->cc_relname = pstrdup(RelationGetRelationName(relation));
1145 793430 : cache->cc_relisshared = RelationGetForm(relation)->relisshared;
1146 :
1147 : /*
1148 : * return to the caller's memory context and close the rel
1149 : */
1150 793430 : MemoryContextSwitchTo(oldcxt);
1151 :
1152 793430 : table_close(relation, AccessShareLock);
1153 :
1154 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache: %s, %d keys",
1155 : cache->cc_relname, cache->cc_nkeys);
1156 :
1157 : /*
1158 : * initialize cache's key information
1159 : */
1160 2048416 : for (i = 0; i < cache->cc_nkeys; ++i)
1161 : {
1162 : Oid keytype;
1163 : RegProcedure eqfunc;
1164 :
1165 : CatalogCacheInitializeCache_DEBUG2;
1166 :
1167 1254986 : if (cache->cc_keyno[i] > 0)
1168 : {
1169 1254986 : Form_pg_attribute attr = TupleDescAttr(tupdesc,
1170 1254986 : cache->cc_keyno[i] - 1);
1171 :
1172 1254986 : keytype = attr->atttypid;
1173 : /* cache key columns should always be NOT NULL */
1174 : Assert(attr->attnotnull);
1175 : }
1176 : else
1177 : {
1178 0 : if (cache->cc_keyno[i] < 0)
1179 0 : elog(FATAL, "sys attributes are not supported in caches");
1180 0 : keytype = OIDOID;
1181 : }
1182 :
1183 1254986 : GetCCHashEqFuncs(keytype,
1184 : &cache->cc_hashfunc[i],
1185 : &eqfunc,
1186 : &cache->cc_fastequal[i]);
1187 :
1188 : /*
1189 : * Do equality-function lookup (we assume this won't need a catalog
1190 : * lookup for any supported type)
1191 : */
1192 1254986 : fmgr_info_cxt(eqfunc,
1193 : &cache->cc_skey[i].sk_func,
1194 : CacheMemoryContext);
1195 :
1196 : /* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
1197 1254986 : cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
1198 :
1199 : /* Fill in sk_strategy as well --- always standard equality */
1200 1254986 : cache->cc_skey[i].sk_strategy = BTEqualStrategyNumber;
1201 1254986 : cache->cc_skey[i].sk_subtype = InvalidOid;
1202 : /* If a catcache key requires a collation, it must be C collation */
1203 1254986 : cache->cc_skey[i].sk_collation = C_COLLATION_OID;
1204 :
1205 : CACHE_elog(DEBUG2, "CatalogCacheInitializeCache %s %d %p",
1206 : cache->cc_relname, i, cache);
1207 : }
1208 :
1209 : /*
1210 : * mark this cache fully initialized
1211 : */
1212 793430 : cache->cc_tupdesc = tupdesc;
1213 793430 : }
1214 :
1215 : /*
1216 : * InitCatCachePhase2 -- external interface for CatalogCacheInitializeCache
1217 : *
1218 : * One reason to call this routine is to ensure that the relcache has
1219 : * created entries for all the catalogs and indexes referenced by catcaches.
1220 : * Therefore, provide an option to open the index as well as fixing the
1221 : * cache itself. An exception is the indexes on pg_am, which we don't use
1222 : * (cf. IndexScanOK).
1223 : */
1224 : void
1225 369796 : InitCatCachePhase2(CatCache *cache, bool touch_index)
1226 : {
1227 369796 : ConditionalCatalogCacheInitializeCache(cache);
1228 :
1229 369794 : if (touch_index &&
1230 337358 : cache->id != AMOID &&
1231 333388 : cache->id != AMNAME)
1232 : {
1233 : Relation idesc;
1234 :
1235 : /*
1236 : * We must lock the underlying catalog before opening the index to
1237 : * avoid deadlock, since index_open could possibly result in reading
1238 : * this same catalog, and if anyone else is exclusive-locking this
1239 : * catalog and index they'll be doing it in that order.
1240 : */
1241 329418 : LockRelationOid(cache->cc_reloid, AccessShareLock);
1242 329418 : idesc = index_open(cache->cc_indexoid, AccessShareLock);
1243 :
1244 : /*
1245 : * While we've got the index open, let's check that it's unique (and
1246 : * not just deferrable-unique, thank you very much). This is just to
1247 : * catch thinkos in definitions of new catcaches, so we don't worry
1248 : * about the pg_am indexes not getting tested.
1249 : */
1250 : Assert(idesc->rd_index->indisunique &&
1251 : idesc->rd_index->indimmediate);
1252 :
1253 329416 : index_close(idesc, AccessShareLock);
1254 329416 : UnlockRelationOid(cache->cc_reloid, AccessShareLock);
1255 : }
1256 369792 : }
1257 :
1258 :
1259 : /*
1260 : * IndexScanOK
1261 : *
1262 : * This function checks for tuples that will be fetched by
1263 : * IndexSupportInitialize() during relcache initialization for
1264 : * certain system indexes that support critical syscaches.
1265 : * We can't use an indexscan to fetch these, else we'll get into
1266 : * infinite recursion. A plain heap scan will work, however.
1267 : * Once we have completed relcache initialization (signaled by
1268 : * criticalRelcachesBuilt), we don't have to worry anymore.
1269 : *
1270 : * Similarly, during backend startup we have to be able to use the
1271 : * pg_authid, pg_auth_members and pg_database syscaches for
1272 : * authentication even if we don't yet have relcache entries for those
1273 : * catalogs' indexes.
1274 : */
1275 : static bool
1276 6943022 : IndexScanOK(CatCache *cache)
1277 : {
1278 6943022 : switch (cache->id)
1279 : {
1280 742368 : case INDEXRELID:
1281 :
1282 : /*
1283 : * Rather than tracking exactly which indexes have to be loaded
1284 : * before we can use indexscans (which changes from time to time),
1285 : * just force all pg_index searches to be heap scans until we've
1286 : * built the critical relcaches.
1287 : */
1288 742368 : if (!criticalRelcachesBuilt)
1289 39896 : return false;
1290 702472 : break;
1291 :
1292 60960 : case AMOID:
1293 : case AMNAME:
1294 :
1295 : /*
1296 : * Always do heap scans in pg_am, because it's so small there's
1297 : * not much point in an indexscan anyway. We *must* do this when
1298 : * initially building critical relcache entries, but we might as
1299 : * well just always do it.
1300 : */
1301 60960 : return false;
1302 :
1303 109598 : case AUTHNAME:
1304 : case AUTHOID:
1305 : case AUTHMEMMEMROLE:
1306 : case DATABASEOID:
1307 :
1308 : /*
1309 : * Protect authentication lookups occurring before relcache has
1310 : * collected entries for shared indexes.
1311 : */
1312 109598 : if (!criticalSharedRelcachesBuilt)
1313 5238 : return false;
1314 104360 : break;
1315 :
1316 6030096 : default:
1317 6030096 : break;
1318 : }
1319 :
1320 : /* Normal case, allow index scan */
1321 6836928 : return true;
1322 : }
1323 :
1324 : /*
1325 : * SearchCatCache
1326 : *
1327 : * This call searches a system cache for a tuple, opening the relation
1328 : * if necessary (on the first access to a particular cache).
1329 : *
1330 : * The result is NULL if not found, or a pointer to a HeapTuple in
1331 : * the cache. The caller must not modify the tuple, and must call
1332 : * ReleaseCatCache() when done with it.
1333 : *
1334 : * The search key values should be expressed as Datums of the key columns'
1335 : * datatype(s). (Pass zeroes for any unused parameters.) As a special
1336 : * exception, the passed-in key for a NAME column can be just a C string;
1337 : * the caller need not go to the trouble of converting it to a fully
1338 : * null-padded NAME.
1339 : */
1340 : HeapTuple
1341 5825750 : SearchCatCache(CatCache *cache,
1342 : Datum v1,
1343 : Datum v2,
1344 : Datum v3,
1345 : Datum v4)
1346 : {
1347 5825750 : return SearchCatCacheInternal(cache, cache->cc_nkeys, v1, v2, v3, v4);
1348 : }
1349 :
1350 :
1351 : /*
1352 : * SearchCatCacheN() are SearchCatCache() versions for a specific number of
1353 : * arguments. The compiler can inline the body and unroll loops, making them a
1354 : * bit faster than SearchCatCache().
1355 : */
1356 :
1357 : HeapTuple
1358 72953820 : SearchCatCache1(CatCache *cache,
1359 : Datum v1)
1360 : {
1361 72953820 : return SearchCatCacheInternal(cache, 1, v1, 0, 0, 0);
1362 : }
1363 :
1364 :
1365 : HeapTuple
1366 5985274 : SearchCatCache2(CatCache *cache,
1367 : Datum v1, Datum v2)
1368 : {
1369 5985274 : return SearchCatCacheInternal(cache, 2, v1, v2, 0, 0);
1370 : }
1371 :
1372 :
1373 : HeapTuple
1374 5658556 : SearchCatCache3(CatCache *cache,
1375 : Datum v1, Datum v2, Datum v3)
1376 : {
1377 5658556 : return SearchCatCacheInternal(cache, 3, v1, v2, v3, 0);
1378 : }
1379 :
1380 :
1381 : HeapTuple
1382 4390260 : SearchCatCache4(CatCache *cache,
1383 : Datum v1, Datum v2, Datum v3, Datum v4)
1384 : {
1385 4390260 : return SearchCatCacheInternal(cache, 4, v1, v2, v3, v4);
1386 : }
1387 :
1388 : /*
1389 : * Work-horse for SearchCatCache/SearchCatCacheN.
1390 : */
1391 : static inline HeapTuple
1392 94813660 : SearchCatCacheInternal(CatCache *cache,
1393 : int nkeys,
1394 : Datum v1,
1395 : Datum v2,
1396 : Datum v3,
1397 : Datum v4)
1398 : {
1399 : Datum arguments[CATCACHE_MAXKEYS];
1400 : uint32 hashValue;
1401 : Index hashIndex;
1402 : dlist_iter iter;
1403 : dlist_head *bucket;
1404 : CatCTup *ct;
1405 :
1406 : Assert(cache->cc_nkeys == nkeys);
1407 :
1408 : /*
1409 : * one-time startup overhead for each cache
1410 : */
1411 94813660 : ConditionalCatalogCacheInitializeCache(cache);
1412 :
1413 : #ifdef CATCACHE_STATS
1414 : cache->cc_searches++;
1415 : #endif
1416 :
1417 : /* Initialize local parameter array */
1418 94813660 : arguments[0] = v1;
1419 94813660 : arguments[1] = v2;
1420 94813660 : arguments[2] = v3;
1421 94813660 : arguments[3] = v4;
1422 :
1423 : /*
1424 : * find the hash bucket in which to look for the tuple
1425 : */
1426 94813660 : hashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1427 94813660 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1428 :
1429 : /*
1430 : * scan the hash bucket until we find a match or exhaust our tuples
1431 : *
1432 : * Note: it's okay to use dlist_foreach here, even though we modify the
1433 : * dlist within the loop, because we don't continue the loop afterwards.
1434 : */
1435 94813660 : bucket = &cache->cc_bucket[hashIndex];
1436 101681048 : dlist_foreach(iter, bucket)
1437 : {
1438 95052532 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1439 :
1440 95052532 : if (ct->dead)
1441 0 : continue; /* ignore dead entries */
1442 :
1443 95052532 : if (ct->hash_value != hashValue)
1444 6867388 : continue; /* quickly skip entry if wrong hash val */
1445 :
1446 88185144 : if (!CatalogCacheCompareTuple(cache, nkeys, ct->keys, arguments))
1447 0 : continue;
1448 :
1449 : /*
1450 : * We found a match in the cache. Move it to the front of the list
1451 : * for its hashbucket, in order to speed subsequent searches. (The
1452 : * most frequently accessed elements in any hashbucket will tend to be
1453 : * near the front of the hashbucket's list.)
1454 : */
1455 88185144 : dlist_move_head(bucket, &ct->cache_elem);
1456 :
1457 : /*
1458 : * If it's a positive entry, bump its refcount and return it. If it's
1459 : * negative, we can report failure to the caller.
1460 : */
1461 88185144 : if (!ct->negative)
1462 : {
1463 83955050 : ResourceOwnerEnlarge(CurrentResourceOwner);
1464 83955050 : ct->refcount++;
1465 83955050 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1466 :
1467 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found in bucket %d",
1468 : cache->cc_relname, hashIndex);
1469 :
1470 : #ifdef CATCACHE_STATS
1471 : cache->cc_hits++;
1472 : #endif
1473 :
1474 83955050 : return &ct->tuple;
1475 : }
1476 : else
1477 : {
1478 : CACHE_elog(DEBUG2, "SearchCatCache(%s): found neg entry in bucket %d",
1479 : cache->cc_relname, hashIndex);
1480 :
1481 : #ifdef CATCACHE_STATS
1482 : cache->cc_neg_hits++;
1483 : #endif
1484 :
1485 4230094 : return NULL;
1486 : }
1487 : }
1488 :
1489 6628516 : return SearchCatCacheMiss(cache, nkeys, hashValue, hashIndex, v1, v2, v3, v4);
1490 : }
1491 :
1492 : /*
1493 : * Search the actual catalogs, rather than the cache.
1494 : *
1495 : * This is kept separate from SearchCatCacheInternal() to keep the fast-path
1496 : * as small as possible. To avoid that effort being undone by a helpful
1497 : * compiler, try to explicitly forbid inlining.
1498 : */
1499 : static pg_noinline HeapTuple
1500 6628516 : SearchCatCacheMiss(CatCache *cache,
1501 : int nkeys,
1502 : uint32 hashValue,
1503 : Index hashIndex,
1504 : Datum v1,
1505 : Datum v2,
1506 : Datum v3,
1507 : Datum v4)
1508 : {
1509 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1510 : Relation relation;
1511 : SysScanDesc scandesc;
1512 : HeapTuple ntp;
1513 : CatCTup *ct;
1514 : bool stale;
1515 : Datum arguments[CATCACHE_MAXKEYS];
1516 :
1517 : /* Initialize local parameter array */
1518 6628516 : arguments[0] = v1;
1519 6628516 : arguments[1] = v2;
1520 6628516 : arguments[2] = v3;
1521 6628516 : arguments[3] = v4;
1522 :
1523 : /*
1524 : * Tuple was not found in cache, so we have to try to retrieve it directly
1525 : * from the relation. If found, we will add it to the cache; if not
1526 : * found, we will add a negative cache entry instead.
1527 : *
1528 : * NOTE: it is possible for recursive cache lookups to occur while reading
1529 : * the relation --- for example, due to shared-cache-inval messages being
1530 : * processed during table_open(). This is OK. It's even possible for one
1531 : * of those lookups to find and enter the very same tuple we are trying to
1532 : * fetch here. If that happens, we will enter a second copy of the tuple
1533 : * into the cache. The first copy will never be referenced again, and
1534 : * will eventually age out of the cache, so there's no functional problem.
1535 : * This case is rare enough that it's not worth expending extra cycles to
1536 : * detect.
1537 : *
1538 : * Another case, which we *must* handle, is that the tuple could become
1539 : * outdated during CatalogCacheCreateEntry's attempt to detoast it (since
1540 : * AcceptInvalidationMessages can run during TOAST table access). We do
1541 : * not want to return already-stale catcache entries, so we loop around
1542 : * and do the table scan again if that happens.
1543 : */
1544 6628516 : relation = table_open(cache->cc_reloid, AccessShareLock);
1545 :
1546 : /*
1547 : * Ok, need to make a lookup in the relation, copy the scankey and fill
1548 : * out any per-call fields.
1549 : */
1550 6628516 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * nkeys);
1551 6628516 : cur_skey[0].sk_argument = v1;
1552 6628516 : cur_skey[1].sk_argument = v2;
1553 6628516 : cur_skey[2].sk_argument = v3;
1554 6628516 : cur_skey[3].sk_argument = v4;
1555 :
1556 : do
1557 : {
1558 6628516 : scandesc = systable_beginscan(relation,
1559 : cache->cc_indexoid,
1560 6628516 : IndexScanOK(cache),
1561 : NULL,
1562 : nkeys,
1563 : cur_skey);
1564 :
1565 6628516 : ct = NULL;
1566 6628516 : stale = false;
1567 :
1568 6628516 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)))
1569 : {
1570 4863146 : ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1571 : hashValue, hashIndex);
1572 : /* upon failure, we must start the scan over */
1573 4863146 : if (ct == NULL)
1574 : {
1575 0 : stale = true;
1576 0 : break;
1577 : }
1578 : /* immediately set the refcount to 1 */
1579 4863146 : ResourceOwnerEnlarge(CurrentResourceOwner);
1580 4863146 : ct->refcount++;
1581 4863146 : ResourceOwnerRememberCatCacheRef(CurrentResourceOwner, &ct->tuple);
1582 4863146 : break; /* assume only one match */
1583 : }
1584 :
1585 6628514 : systable_endscan(scandesc);
1586 6628514 : } while (stale);
1587 :
1588 6628514 : table_close(relation, AccessShareLock);
1589 :
1590 : /*
1591 : * If tuple was not found, we need to build a negative cache entry
1592 : * containing a fake tuple. The fake tuple has the correct key columns,
1593 : * but nulls everywhere else.
1594 : *
1595 : * In bootstrap mode, we don't build negative entries, because the cache
1596 : * invalidation mechanism isn't alive and can't clear them if the tuple
1597 : * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
1598 : * cache inval for that.)
1599 : */
1600 6628514 : if (ct == NULL)
1601 : {
1602 1765368 : if (IsBootstrapProcessingMode())
1603 54000 : return NULL;
1604 :
1605 1711368 : ct = CatalogCacheCreateEntry(cache, NULL, arguments,
1606 : hashValue, hashIndex);
1607 :
1608 : /* Creating a negative cache entry shouldn't fail */
1609 : Assert(ct != NULL);
1610 :
1611 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1612 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1613 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put neg entry in bucket %d",
1614 : cache->cc_relname, hashIndex);
1615 :
1616 : /*
1617 : * We are not returning the negative entry to the caller, so leave its
1618 : * refcount zero.
1619 : */
1620 :
1621 1711368 : return NULL;
1622 : }
1623 :
1624 : CACHE_elog(DEBUG2, "SearchCatCache(%s): Contains %d/%d tuples",
1625 : cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
1626 : CACHE_elog(DEBUG2, "SearchCatCache(%s): put in bucket %d",
1627 : cache->cc_relname, hashIndex);
1628 :
1629 : #ifdef CATCACHE_STATS
1630 : cache->cc_newloads++;
1631 : #endif
1632 :
1633 4863146 : return &ct->tuple;
1634 : }
1635 :
1636 : /*
1637 : * ReleaseCatCache
1638 : *
1639 : * Decrement the reference count of a catcache entry (releasing the
1640 : * hold grabbed by a successful SearchCatCache).
1641 : *
1642 : * NOTE: if compiled with -DCATCACHE_FORCE_RELEASE then catcache entries
1643 : * will be freed as soon as their refcount goes to zero. In combination
1644 : * with aset.c's CLOBBER_FREED_MEMORY option, this provides a good test
1645 : * to catch references to already-released catcache entries.
1646 : */
1647 : void
1648 88807426 : ReleaseCatCache(HeapTuple tuple)
1649 : {
1650 88807426 : ReleaseCatCacheWithOwner(tuple, CurrentResourceOwner);
1651 88807426 : }
1652 :
1653 : static void
1654 88818196 : ReleaseCatCacheWithOwner(HeapTuple tuple, ResourceOwner resowner)
1655 : {
1656 88818196 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
1657 : offsetof(CatCTup, tuple));
1658 :
1659 : /* Safety checks to ensure we were handed a cache entry */
1660 : Assert(ct->ct_magic == CT_MAGIC);
1661 : Assert(ct->refcount > 0);
1662 :
1663 88818196 : ct->refcount--;
1664 88818196 : if (resowner)
1665 88807426 : ResourceOwnerForgetCatCacheRef(CurrentResourceOwner, &ct->tuple);
1666 :
1667 88818196 : if (
1668 : #ifndef CATCACHE_FORCE_RELEASE
1669 88818196 : ct->dead &&
1670 : #endif
1671 1558 : ct->refcount == 0 &&
1672 1442 : (ct->c_list == NULL || ct->c_list->refcount == 0))
1673 1442 : CatCacheRemoveCTup(ct->my_cache, ct);
1674 88818196 : }
1675 :
1676 :
1677 : /*
1678 : * GetCatCacheHashValue
1679 : *
1680 : * Compute the hash value for a given set of search keys.
1681 : *
1682 : * The reason for exposing this as part of the API is that the hash value is
1683 : * exposed in cache invalidation operations, so there are places outside the
1684 : * catcache code that need to be able to compute the hash values.
1685 : */
1686 : uint32
1687 1095620 : GetCatCacheHashValue(CatCache *cache,
1688 : Datum v1,
1689 : Datum v2,
1690 : Datum v3,
1691 : Datum v4)
1692 : {
1693 : /*
1694 : * one-time startup overhead for each cache
1695 : */
1696 1095620 : ConditionalCatalogCacheInitializeCache(cache);
1697 :
1698 : /*
1699 : * calculate the hash value
1700 : */
1701 1095620 : return CatalogCacheComputeHashValue(cache, cache->cc_nkeys, v1, v2, v3, v4);
1702 : }
1703 :
1704 :
1705 : /*
1706 : * SearchCatCacheList
1707 : *
1708 : * Generate a list of all tuples matching a partial key (that is,
1709 : * a key specifying just the first K of the cache's N key columns).
1710 : *
1711 : * It doesn't make any sense to specify all of the cache's key columns
1712 : * here: since the key is unique, there could be at most one match, so
1713 : * you ought to use SearchCatCache() instead. Hence this function takes
1714 : * one fewer Datum argument than SearchCatCache() does.
1715 : *
1716 : * The caller must not modify the list object or the pointed-to tuples,
1717 : * and must call ReleaseCatCacheList() when done with the list.
1718 : */
1719 : CatCList *
1720 3837364 : SearchCatCacheList(CatCache *cache,
1721 : int nkeys,
1722 : Datum v1,
1723 : Datum v2,
1724 : Datum v3)
1725 : {
1726 3837364 : Datum v4 = 0; /* dummy last-column value */
1727 : Datum arguments[CATCACHE_MAXKEYS];
1728 : uint32 lHashValue;
1729 : Index lHashIndex;
1730 : dlist_iter iter;
1731 : dlist_head *lbucket;
1732 : CatCList *cl;
1733 : CatCTup *ct;
1734 : List *volatile ctlist;
1735 : ListCell *ctlist_item;
1736 : int nmembers;
1737 : bool ordered;
1738 : HeapTuple ntp;
1739 : MemoryContext oldcxt;
1740 : int i;
1741 : CatCInProgress *save_in_progress;
1742 : CatCInProgress in_progress_ent;
1743 :
1744 : /*
1745 : * one-time startup overhead for each cache
1746 : */
1747 3837364 : ConditionalCatalogCacheInitializeCache(cache);
1748 :
1749 : Assert(nkeys > 0 && nkeys < cache->cc_nkeys);
1750 :
1751 : #ifdef CATCACHE_STATS
1752 : cache->cc_lsearches++;
1753 : #endif
1754 :
1755 : /* Initialize local parameter array */
1756 3837364 : arguments[0] = v1;
1757 3837364 : arguments[1] = v2;
1758 3837364 : arguments[2] = v3;
1759 3837364 : arguments[3] = v4;
1760 :
1761 : /*
1762 : * If we haven't previously done a list search in this cache, create the
1763 : * bucket header array; otherwise, consider whether it's time to enlarge
1764 : * it.
1765 : */
1766 3837364 : if (cache->cc_lbucket == NULL)
1767 : {
1768 : /* Arbitrary initial size --- must be a power of 2 */
1769 40424 : int nbuckets = 16;
1770 :
1771 40424 : cache->cc_lbucket = (dlist_head *)
1772 40424 : MemoryContextAllocZero(CacheMemoryContext,
1773 : nbuckets * sizeof(dlist_head));
1774 : /* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */
1775 40424 : cache->cc_nlbuckets = nbuckets;
1776 : }
1777 : else
1778 : {
1779 : /*
1780 : * If the hash table has become too full, enlarge the buckets array.
1781 : * Quite arbitrarily, we enlarge when fill factor > 2.
1782 : */
1783 3796940 : if (cache->cc_nlist > cache->cc_nlbuckets * 2)
1784 1220 : RehashCatCacheLists(cache);
1785 : }
1786 :
1787 : /*
1788 : * Find the hash bucket in which to look for the CatCList.
1789 : */
1790 3837364 : lHashValue = CatalogCacheComputeHashValue(cache, nkeys, v1, v2, v3, v4);
1791 3837364 : lHashIndex = HASH_INDEX(lHashValue, cache->cc_nlbuckets);
1792 :
1793 : /*
1794 : * scan the items until we find a match or exhaust our list
1795 : *
1796 : * Note: it's okay to use dlist_foreach here, even though we modify the
1797 : * dlist within the loop, because we don't continue the loop afterwards.
1798 : */
1799 3837364 : lbucket = &cache->cc_lbucket[lHashIndex];
1800 4223646 : dlist_foreach(iter, lbucket)
1801 : {
1802 3909148 : cl = dlist_container(CatCList, cache_elem, iter.cur);
1803 :
1804 3909148 : if (cl->dead)
1805 0 : continue; /* ignore dead entries */
1806 :
1807 3909148 : if (cl->hash_value != lHashValue)
1808 386282 : continue; /* quickly skip entry if wrong hash val */
1809 :
1810 : /*
1811 : * see if the cached list matches our key.
1812 : */
1813 3522866 : if (cl->nkeys != nkeys)
1814 0 : continue;
1815 :
1816 3522866 : if (!CatalogCacheCompareTuple(cache, nkeys, cl->keys, arguments))
1817 0 : continue;
1818 :
1819 : /*
1820 : * We found a matching list. Move the list to the front of the list
1821 : * for its hashbucket, so as to speed subsequent searches. (We do not
1822 : * move the members to the fronts of their hashbucket lists, however,
1823 : * since there's no point in that unless they are searched for
1824 : * individually.)
1825 : */
1826 3522866 : dlist_move_head(lbucket, &cl->cache_elem);
1827 :
1828 : /* Bump the list's refcount and return it */
1829 3522866 : ResourceOwnerEnlarge(CurrentResourceOwner);
1830 3522866 : cl->refcount++;
1831 3522866 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
1832 :
1833 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): found list",
1834 : cache->cc_relname);
1835 :
1836 : #ifdef CATCACHE_STATS
1837 : cache->cc_lhits++;
1838 : #endif
1839 :
1840 3522866 : return cl;
1841 : }
1842 :
1843 : /*
1844 : * List was not found in cache, so we have to build it by reading the
1845 : * relation. For each matching tuple found in the relation, use an
1846 : * existing cache entry if possible, else build a new one.
1847 : *
1848 : * We have to bump the member refcounts temporarily to ensure they won't
1849 : * get dropped from the cache while loading other members. We use a PG_TRY
1850 : * block to ensure we can undo those refcounts if we get an error before
1851 : * we finish constructing the CatCList. ctlist must be valid throughout
1852 : * the PG_TRY block.
1853 : */
1854 314498 : ctlist = NIL;
1855 :
1856 : /*
1857 : * Cache invalidation can happen while we're building the list.
1858 : * CatalogCacheCreateEntry() handles concurrent invalidation of individual
1859 : * tuples, but it's also possible that a new entry is concurrently added
1860 : * that should be part of the list we're building. Register an
1861 : * "in-progress" entry that will receive the invalidation, until we have
1862 : * built the final list entry.
1863 : */
1864 314498 : save_in_progress = catcache_in_progress_stack;
1865 314498 : in_progress_ent.next = catcache_in_progress_stack;
1866 314498 : in_progress_ent.cache = cache;
1867 314498 : in_progress_ent.hash_value = lHashValue;
1868 314498 : in_progress_ent.list = true;
1869 314498 : in_progress_ent.dead = false;
1870 314498 : catcache_in_progress_stack = &in_progress_ent;
1871 :
1872 314498 : PG_TRY();
1873 : {
1874 : ScanKeyData cur_skey[CATCACHE_MAXKEYS];
1875 : Relation relation;
1876 : SysScanDesc scandesc;
1877 314498 : bool first_iter = true;
1878 :
1879 314498 : relation = table_open(cache->cc_reloid, AccessShareLock);
1880 :
1881 : /*
1882 : * Ok, need to make a lookup in the relation, copy the scankey and
1883 : * fill out any per-call fields.
1884 : */
1885 314498 : memcpy(cur_skey, cache->cc_skey, sizeof(ScanKeyData) * cache->cc_nkeys);
1886 314498 : cur_skey[0].sk_argument = v1;
1887 314498 : cur_skey[1].sk_argument = v2;
1888 314498 : cur_skey[2].sk_argument = v3;
1889 314498 : cur_skey[3].sk_argument = v4;
1890 :
1891 : /*
1892 : * Scan the table for matching entries. If an invalidation arrives
1893 : * mid-build, we will loop back here to retry.
1894 : */
1895 : do
1896 : {
1897 : /*
1898 : * If we are retrying, release refcounts on any items created on
1899 : * the previous iteration. We dare not try to free them if
1900 : * they're now unreferenced, since an error while doing that would
1901 : * result in the PG_CATCH below doing extra refcount decrements.
1902 : * Besides, we'll likely re-adopt those items in the next
1903 : * iteration, so it's not worth complicating matters to try to get
1904 : * rid of them.
1905 : */
1906 314506 : foreach(ctlist_item, ctlist)
1907 : {
1908 0 : ct = (CatCTup *) lfirst(ctlist_item);
1909 : Assert(ct->c_list == NULL);
1910 : Assert(ct->refcount > 0);
1911 0 : ct->refcount--;
1912 : }
1913 : /* Reset ctlist in preparation for new try */
1914 314506 : ctlist = NIL;
1915 314506 : in_progress_ent.dead = false;
1916 :
1917 629012 : scandesc = systable_beginscan(relation,
1918 : cache->cc_indexoid,
1919 314506 : IndexScanOK(cache),
1920 : NULL,
1921 : nkeys,
1922 : cur_skey);
1923 :
1924 : /* The list will be ordered iff we are doing an index scan */
1925 314506 : ordered = (scandesc->irel != NULL);
1926 :
1927 : /* Injection point to help testing the recursive invalidation case */
1928 314506 : if (first_iter)
1929 : {
1930 314498 : INJECTION_POINT("catcache-list-miss-systable-scan-started", NULL);
1931 314498 : first_iter = false;
1932 : }
1933 :
1934 1291754 : while (HeapTupleIsValid(ntp = systable_getnext(scandesc)) &&
1935 977254 : !in_progress_ent.dead)
1936 : {
1937 : uint32 hashValue;
1938 : Index hashIndex;
1939 977248 : bool found = false;
1940 : dlist_head *bucket;
1941 :
1942 : /*
1943 : * See if there's an entry for this tuple already.
1944 : */
1945 977248 : ct = NULL;
1946 977248 : hashValue = CatalogCacheComputeTupleHashValue(cache, cache->cc_nkeys, ntp);
1947 977248 : hashIndex = HASH_INDEX(hashValue, cache->cc_nbuckets);
1948 :
1949 977248 : bucket = &cache->cc_bucket[hashIndex];
1950 1350732 : dlist_foreach(iter, bucket)
1951 : {
1952 519372 : ct = dlist_container(CatCTup, cache_elem, iter.cur);
1953 :
1954 519372 : if (ct->dead || ct->negative)
1955 972 : continue; /* ignore dead and negative entries */
1956 :
1957 518400 : if (ct->hash_value != hashValue)
1958 353660 : continue; /* quickly skip entry if wrong hash val */
1959 :
1960 164740 : if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
1961 0 : continue; /* not same tuple */
1962 :
1963 : /*
1964 : * Found a match, but can't use it if it belongs to
1965 : * another list already
1966 : */
1967 164740 : if (ct->c_list)
1968 18852 : continue;
1969 :
1970 145888 : found = true;
1971 145888 : break; /* A-OK */
1972 : }
1973 :
1974 977248 : if (!found)
1975 : {
1976 : /* We didn't find a usable entry, so make a new one */
1977 831360 : ct = CatalogCacheCreateEntry(cache, ntp, NULL,
1978 : hashValue, hashIndex);
1979 :
1980 : /* upon failure, we must start the scan over */
1981 831360 : if (ct == NULL)
1982 : {
1983 0 : in_progress_ent.dead = true;
1984 0 : break;
1985 : }
1986 : }
1987 :
1988 : /* Careful here: add entry to ctlist, then bump its refcount */
1989 : /* This way leaves state correct if lappend runs out of memory */
1990 977248 : ctlist = lappend(ctlist, ct);
1991 977248 : ct->refcount++;
1992 : }
1993 :
1994 314506 : systable_endscan(scandesc);
1995 314506 : } while (in_progress_ent.dead);
1996 :
1997 314498 : table_close(relation, AccessShareLock);
1998 :
1999 : /* Make sure the resource owner has room to remember this entry. */
2000 314498 : ResourceOwnerEnlarge(CurrentResourceOwner);
2001 :
2002 : /* Now we can build the CatCList entry. */
2003 314498 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2004 314498 : nmembers = list_length(ctlist);
2005 : cl = (CatCList *)
2006 314498 : palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
2007 :
2008 : /* Extract key values */
2009 314498 : CatCacheCopyKeys(cache->cc_tupdesc, nkeys, cache->cc_keyno,
2010 314498 : arguments, cl->keys);
2011 314498 : MemoryContextSwitchTo(oldcxt);
2012 :
2013 : /*
2014 : * We are now past the last thing that could trigger an elog before we
2015 : * have finished building the CatCList and remembering it in the
2016 : * resource owner. So it's OK to fall out of the PG_TRY, and indeed
2017 : * we'd better do so before we start marking the members as belonging
2018 : * to the list.
2019 : */
2020 : }
2021 0 : PG_CATCH();
2022 : {
2023 : Assert(catcache_in_progress_stack == &in_progress_ent);
2024 0 : catcache_in_progress_stack = save_in_progress;
2025 :
2026 0 : foreach(ctlist_item, ctlist)
2027 : {
2028 0 : ct = (CatCTup *) lfirst(ctlist_item);
2029 : Assert(ct->c_list == NULL);
2030 : Assert(ct->refcount > 0);
2031 0 : ct->refcount--;
2032 0 : if (
2033 : #ifndef CATCACHE_FORCE_RELEASE
2034 0 : ct->dead &&
2035 : #endif
2036 0 : ct->refcount == 0 &&
2037 0 : (ct->c_list == NULL || ct->c_list->refcount == 0))
2038 0 : CatCacheRemoveCTup(cache, ct);
2039 : }
2040 :
2041 0 : PG_RE_THROW();
2042 : }
2043 314498 : PG_END_TRY();
2044 : Assert(catcache_in_progress_stack == &in_progress_ent);
2045 314498 : catcache_in_progress_stack = save_in_progress;
2046 :
2047 314498 : cl->cl_magic = CL_MAGIC;
2048 314498 : cl->my_cache = cache;
2049 314498 : cl->refcount = 0; /* for the moment */
2050 314498 : cl->dead = false;
2051 314498 : cl->ordered = ordered;
2052 314498 : cl->nkeys = nkeys;
2053 314498 : cl->hash_value = lHashValue;
2054 314498 : cl->n_members = nmembers;
2055 :
2056 314498 : i = 0;
2057 1291746 : foreach(ctlist_item, ctlist)
2058 : {
2059 977248 : cl->members[i++] = ct = (CatCTup *) lfirst(ctlist_item);
2060 : Assert(ct->c_list == NULL);
2061 977248 : ct->c_list = cl;
2062 : /* release the temporary refcount on the member */
2063 : Assert(ct->refcount > 0);
2064 977248 : ct->refcount--;
2065 : /* mark list dead if any members already dead */
2066 977248 : if (ct->dead)
2067 0 : cl->dead = true;
2068 : }
2069 : Assert(i == nmembers);
2070 :
2071 : /*
2072 : * Add the CatCList to the appropriate bucket, and count it.
2073 : */
2074 314498 : dlist_push_head(lbucket, &cl->cache_elem);
2075 :
2076 314498 : cache->cc_nlist++;
2077 :
2078 : /* Finally, bump the list's refcount and return it */
2079 314498 : cl->refcount++;
2080 314498 : ResourceOwnerRememberCatCacheListRef(CurrentResourceOwner, cl);
2081 :
2082 : CACHE_elog(DEBUG2, "SearchCatCacheList(%s): made list of %d members",
2083 : cache->cc_relname, nmembers);
2084 :
2085 314498 : return cl;
2086 : }
2087 :
2088 : /*
2089 : * ReleaseCatCacheList
2090 : *
2091 : * Decrement the reference count of a catcache list.
2092 : */
2093 : void
2094 3837328 : ReleaseCatCacheList(CatCList *list)
2095 : {
2096 3837328 : ReleaseCatCacheListWithOwner(list, CurrentResourceOwner);
2097 3837328 : }
2098 :
2099 : static void
2100 3837364 : ReleaseCatCacheListWithOwner(CatCList *list, ResourceOwner resowner)
2101 : {
2102 : /* Safety checks to ensure we were handed a cache entry */
2103 : Assert(list->cl_magic == CL_MAGIC);
2104 : Assert(list->refcount > 0);
2105 3837364 : list->refcount--;
2106 3837364 : if (resowner)
2107 3837328 : ResourceOwnerForgetCatCacheListRef(CurrentResourceOwner, list);
2108 :
2109 3837364 : if (
2110 : #ifndef CATCACHE_FORCE_RELEASE
2111 3837364 : list->dead &&
2112 : #endif
2113 6 : list->refcount == 0)
2114 6 : CatCacheRemoveCList(list->my_cache, list);
2115 3837364 : }
2116 :
2117 :
2118 : /*
2119 : * CatalogCacheCreateEntry
2120 : * Create a new CatCTup entry, copying the given HeapTuple and other
2121 : * supplied data into it. The new entry initially has refcount 0.
2122 : *
2123 : * To create a normal cache entry, ntp must be the HeapTuple just fetched
2124 : * from scandesc, and "arguments" is not used. To create a negative cache
2125 : * entry, pass NULL for ntp; then "arguments" is the cache keys to use.
2126 : * In either case, hashValue/hashIndex are the hash values computed from
2127 : * the cache keys.
2128 : *
2129 : * Returns NULL if we attempt to detoast the tuple and observe that it
2130 : * became stale. (This cannot happen for a negative entry.) Caller must
2131 : * retry the tuple lookup in that case.
2132 : */
2133 : static CatCTup *
2134 7405874 : CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
2135 : uint32 hashValue, Index hashIndex)
2136 : {
2137 : CatCTup *ct;
2138 : MemoryContext oldcxt;
2139 :
2140 7405874 : if (ntp)
2141 : {
2142 : int i;
2143 5694506 : HeapTuple dtp = NULL;
2144 :
2145 : /*
2146 : * The invalidation of the in-progress entry essentially never happens
2147 : * during our regression tests, and there's no easy way to force it to
2148 : * fail for testing purposes. To ensure we have test coverage for the
2149 : * retry paths in our callers, make debug builds randomly fail about
2150 : * 0.1% of the times through this code path, even when there's no
2151 : * toasted fields.
2152 : */
2153 : #ifdef USE_ASSERT_CHECKING
2154 : if (pg_prng_uint32(&pg_global_prng_state) <= (PG_UINT32_MAX / 1000))
2155 : return NULL;
2156 : #endif
2157 :
2158 : /*
2159 : * If there are any out-of-line toasted fields in the tuple, expand
2160 : * them in-line. This saves cycles during later use of the catcache
2161 : * entry, and also protects us against the possibility of the toast
2162 : * tuples being freed before we attempt to fetch them, in case of
2163 : * something using a slightly stale catcache entry.
2164 : */
2165 5694506 : if (HeapTupleHasExternal(ntp))
2166 : {
2167 : CatCInProgress *save_in_progress;
2168 : CatCInProgress in_progress_ent;
2169 :
2170 : /*
2171 : * The tuple could become stale while we are doing toast table
2172 : * access (since AcceptInvalidationMessages can run then). The
2173 : * invalidation will mark our in-progress entry as dead.
2174 : */
2175 4376 : save_in_progress = catcache_in_progress_stack;
2176 4376 : in_progress_ent.next = catcache_in_progress_stack;
2177 4376 : in_progress_ent.cache = cache;
2178 4376 : in_progress_ent.hash_value = hashValue;
2179 4376 : in_progress_ent.list = false;
2180 4376 : in_progress_ent.dead = false;
2181 4376 : catcache_in_progress_stack = &in_progress_ent;
2182 :
2183 4376 : PG_TRY();
2184 : {
2185 4376 : dtp = toast_flatten_tuple(ntp, cache->cc_tupdesc);
2186 : }
2187 0 : PG_FINALLY();
2188 : {
2189 : Assert(catcache_in_progress_stack == &in_progress_ent);
2190 4376 : catcache_in_progress_stack = save_in_progress;
2191 : }
2192 4376 : PG_END_TRY();
2193 :
2194 4376 : if (in_progress_ent.dead)
2195 : {
2196 0 : heap_freetuple(dtp);
2197 0 : return NULL;
2198 : }
2199 : }
2200 : else
2201 5690130 : dtp = ntp;
2202 :
2203 : /* Allocate memory for CatCTup and the cached tuple in one go */
2204 5694506 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2205 :
2206 11389012 : ct = (CatCTup *) palloc(sizeof(CatCTup) +
2207 5694506 : MAXIMUM_ALIGNOF + dtp->t_len);
2208 5694506 : ct->tuple.t_len = dtp->t_len;
2209 5694506 : ct->tuple.t_self = dtp->t_self;
2210 5694506 : ct->tuple.t_tableOid = dtp->t_tableOid;
2211 5694506 : ct->tuple.t_data = (HeapTupleHeader)
2212 5694506 : MAXALIGN(((char *) ct) + sizeof(CatCTup));
2213 : /* copy tuple contents */
2214 5694506 : memcpy((char *) ct->tuple.t_data,
2215 5694506 : (const char *) dtp->t_data,
2216 5694506 : dtp->t_len);
2217 5694506 : MemoryContextSwitchTo(oldcxt);
2218 :
2219 5694506 : if (dtp != ntp)
2220 4376 : heap_freetuple(dtp);
2221 :
2222 : /* extract keys - they'll point into the tuple if not by-value */
2223 16348324 : for (i = 0; i < cache->cc_nkeys; i++)
2224 : {
2225 : Datum atp;
2226 : bool isnull;
2227 :
2228 10653818 : atp = heap_getattr(&ct->tuple,
2229 : cache->cc_keyno[i],
2230 : cache->cc_tupdesc,
2231 : &isnull);
2232 : Assert(!isnull);
2233 10653818 : ct->keys[i] = atp;
2234 : }
2235 : }
2236 : else
2237 : {
2238 : /* Set up keys for a negative cache entry */
2239 1711368 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2240 1711368 : ct = (CatCTup *) palloc(sizeof(CatCTup));
2241 :
2242 : /*
2243 : * Store keys - they'll point into separately allocated memory if not
2244 : * by-value.
2245 : */
2246 1711368 : CatCacheCopyKeys(cache->cc_tupdesc, cache->cc_nkeys, cache->cc_keyno,
2247 1711368 : arguments, ct->keys);
2248 1711368 : MemoryContextSwitchTo(oldcxt);
2249 : }
2250 :
2251 : /*
2252 : * Finish initializing the CatCTup header, and add it to the cache's
2253 : * linked list and counts.
2254 : */
2255 7405874 : ct->ct_magic = CT_MAGIC;
2256 7405874 : ct->my_cache = cache;
2257 7405874 : ct->c_list = NULL;
2258 7405874 : ct->refcount = 0; /* for the moment */
2259 7405874 : ct->dead = false;
2260 7405874 : ct->negative = (ntp == NULL);
2261 7405874 : ct->hash_value = hashValue;
2262 :
2263 7405874 : dlist_push_head(&cache->cc_bucket[hashIndex], &ct->cache_elem);
2264 :
2265 7405874 : cache->cc_ntup++;
2266 7405874 : CacheHdr->ch_ntup++;
2267 :
2268 : /*
2269 : * If the hash table has become too full, enlarge the buckets array. Quite
2270 : * arbitrarily, we enlarge when fill factor > 2.
2271 : */
2272 7405874 : if (cache->cc_ntup > cache->cc_nbuckets * 2)
2273 6402 : RehashCatCache(cache);
2274 :
2275 7405874 : return ct;
2276 : }
2277 :
2278 : /*
2279 : * Helper routine that frees keys stored in the keys array.
2280 : */
2281 : static void
2282 584042 : CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos, Datum *keys)
2283 : {
2284 : int i;
2285 :
2286 1798166 : for (i = 0; i < nkeys; i++)
2287 : {
2288 1214124 : int attnum = attnos[i];
2289 : Form_pg_attribute att;
2290 :
2291 : /* system attribute are not supported in caches */
2292 : Assert(attnum > 0);
2293 :
2294 1214124 : att = TupleDescAttr(tupdesc, attnum - 1);
2295 :
2296 1214124 : if (!att->attbyval)
2297 518592 : pfree(DatumGetPointer(keys[i]));
2298 : }
2299 584042 : }
2300 :
2301 : /*
2302 : * Helper routine that copies the keys in the srckeys array into the dstkeys
2303 : * one, guaranteeing that the datums are fully allocated in the current memory
2304 : * context.
2305 : */
2306 : static void
2307 2025866 : CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
2308 : Datum *srckeys, Datum *dstkeys)
2309 : {
2310 : int i;
2311 :
2312 : /*
2313 : * XXX: memory and lookup performance could possibly be improved by
2314 : * storing all keys in one allocation.
2315 : */
2316 :
2317 6336054 : for (i = 0; i < nkeys; i++)
2318 : {
2319 4310188 : int attnum = attnos[i];
2320 4310188 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2321 4310188 : Datum src = srckeys[i];
2322 : NameData srcname;
2323 :
2324 : /*
2325 : * Must be careful in case the caller passed a C string where a NAME
2326 : * is wanted: convert the given argument to a correctly padded NAME.
2327 : * Otherwise the memcpy() done by datumCopy() could fall off the end
2328 : * of memory.
2329 : */
2330 4310188 : if (att->atttypid == NAMEOID)
2331 : {
2332 864856 : namestrcpy(&srcname, DatumGetCString(src));
2333 864856 : src = NameGetDatum(&srcname);
2334 : }
2335 :
2336 4310188 : dstkeys[i] = datumCopy(src,
2337 4310188 : att->attbyval,
2338 4310188 : att->attlen);
2339 : }
2340 2025866 : }
2341 :
2342 : /*
2343 : * PrepareToInvalidateCacheTuple()
2344 : *
2345 : * This is part of a rather subtle chain of events, so pay attention:
2346 : *
2347 : * When a tuple is inserted or deleted, it cannot be flushed from the
2348 : * catcaches immediately, for reasons explained at the top of cache/inval.c.
2349 : * Instead we have to add entry(s) for the tuple to a list of pending tuple
2350 : * invalidations that will be done at the end of the command or transaction.
2351 : *
2352 : * The lists of tuples that need to be flushed are kept by inval.c. This
2353 : * routine is a helper routine for inval.c. Given a tuple belonging to
2354 : * the specified relation, find all catcaches it could be in, compute the
2355 : * correct hash value for each such catcache, and call the specified
2356 : * function to record the cache id and hash value in inval.c's lists.
2357 : * SysCacheInvalidate will be called later, if appropriate,
2358 : * using the recorded information.
2359 : *
2360 : * For an insert or delete, tuple is the target tuple and newtuple is NULL.
2361 : * For an update, we are called just once, with tuple being the old tuple
2362 : * version and newtuple the new version. We should make two list entries
2363 : * if the tuple's hash value changed, but only one if it didn't.
2364 : *
2365 : * Note that it is irrelevant whether the given tuple is actually loaded
2366 : * into the catcache at the moment. Even if it's not there now, it might
2367 : * be by the end of the command, or there might be a matching negative entry
2368 : * to flush --- or other backends' caches might have such entries --- so
2369 : * we have to make list entries to flush it later.
2370 : *
2371 : * Also note that it's not an error if there are no catcaches for the
2372 : * specified relation. inval.c doesn't know exactly which rels have
2373 : * catcaches --- it will call this routine for any tuple that's in a
2374 : * system relation.
2375 : */
2376 : void
2377 3278804 : PrepareToInvalidateCacheTuple(Relation relation,
2378 : HeapTuple tuple,
2379 : HeapTuple newtuple,
2380 : void (*function) (int, uint32, Oid, void *),
2381 : void *context)
2382 : {
2383 : slist_iter iter;
2384 : Oid reloid;
2385 :
2386 : CACHE_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
2387 :
2388 : /*
2389 : * sanity checks
2390 : */
2391 : Assert(RelationIsValid(relation));
2392 : Assert(HeapTupleIsValid(tuple));
2393 : Assert(PointerIsValid(function));
2394 : Assert(CacheHdr != NULL);
2395 :
2396 3278804 : reloid = RelationGetRelid(relation);
2397 :
2398 : /* ----------------
2399 : * for each cache
2400 : * if the cache contains tuples from the specified relation
2401 : * compute the tuple's hash value(s) in this cache,
2402 : * and call the passed function to register the information.
2403 : * ----------------
2404 : */
2405 :
2406 281977144 : slist_foreach(iter, &CacheHdr->ch_caches)
2407 : {
2408 278698340 : CatCache *ccp = slist_container(CatCache, cc_next, iter.cur);
2409 : uint32 hashvalue;
2410 : Oid dbid;
2411 :
2412 278698340 : if (ccp->cc_reloid != reloid)
2413 272708340 : continue;
2414 :
2415 : /* Just in case cache hasn't finished initialization yet... */
2416 5990000 : ConditionalCatalogCacheInitializeCache(ccp);
2417 :
2418 5990000 : hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
2419 5990000 : dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
2420 :
2421 5990000 : (*function) (ccp->id, hashvalue, dbid, context);
2422 :
2423 5990000 : if (newtuple)
2424 : {
2425 : uint32 newhashvalue;
2426 :
2427 433664 : newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
2428 :
2429 433664 : if (newhashvalue != hashvalue)
2430 6208 : (*function) (ccp->id, newhashvalue, dbid, context);
2431 : }
2432 : }
2433 3278804 : }
2434 :
2435 : /* ResourceOwner callbacks */
2436 :
2437 : static void
2438 10770 : ResOwnerReleaseCatCache(Datum res)
2439 : {
2440 10770 : ReleaseCatCacheWithOwner((HeapTuple) DatumGetPointer(res), NULL);
2441 10770 : }
2442 :
2443 : static char *
2444 0 : ResOwnerPrintCatCache(Datum res)
2445 : {
2446 0 : HeapTuple tuple = (HeapTuple) DatumGetPointer(res);
2447 0 : CatCTup *ct = (CatCTup *) (((char *) tuple) -
2448 : offsetof(CatCTup, tuple));
2449 :
2450 : /* Safety check to ensure we were handed a cache entry */
2451 : Assert(ct->ct_magic == CT_MAGIC);
2452 :
2453 0 : return psprintf("cache %s (%d), tuple %u/%u has count %d",
2454 0 : ct->my_cache->cc_relname, ct->my_cache->id,
2455 0 : ItemPointerGetBlockNumber(&(tuple->t_self)),
2456 0 : ItemPointerGetOffsetNumber(&(tuple->t_self)),
2457 : ct->refcount);
2458 : }
2459 :
2460 : static void
2461 36 : ResOwnerReleaseCatCacheList(Datum res)
2462 : {
2463 36 : ReleaseCatCacheListWithOwner((CatCList *) DatumGetPointer(res), NULL);
2464 36 : }
2465 :
2466 : static char *
2467 0 : ResOwnerPrintCatCacheList(Datum res)
2468 : {
2469 0 : CatCList *list = (CatCList *) DatumGetPointer(res);
2470 :
2471 0 : return psprintf("cache %s (%d), list %p has count %d",
2472 0 : list->my_cache->cc_relname, list->my_cache->id,
2473 : list, list->refcount);
2474 : }
|