Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * typcache.c
4 : * POSTGRES type cache code
5 : *
6 : * The type cache exists to speed lookup of certain information about data
7 : * types that is not directly available from a type's pg_type row. For
8 : * example, we use a type's default btree opclass, or the default hash
9 : * opclass if no btree opclass exists, to determine which operators should
10 : * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 : *
12 : * Several seemingly-odd choices have been made to support use of the type
13 : * cache by generic array and record handling routines, such as array_eq(),
14 : * record_cmp(), and hash_array(). Because those routines are used as index
15 : * support operations, they cannot leak memory. To allow them to execute
16 : * efficiently, all information that they would like to re-use across calls
17 : * is kept in the type cache.
18 : *
19 : * Once created, a type cache entry lives as long as the backend does, so
20 : * there is no need for a call to release a cache entry. If the type is
21 : * dropped, the cache entry simply becomes wasted storage. This is not
22 : * expected to happen often, and assuming that typcache entries are good
23 : * permanently allows caching pointers to them in long-lived places.
24 : *
25 : * We have some provisions for updating cache entries if the stored data
26 : * becomes obsolete. Core data extracted from the pg_type row is updated
27 : * when we detect updates to pg_type. Information dependent on opclasses is
28 : * cleared if we detect updates to pg_opclass. We also support clearing the
29 : * tuple descriptor and operator/function parts of a rowtype's cache entry,
30 : * since those may need to change as a consequence of ALTER TABLE. Domain
31 : * constraint changes are also tracked properly.
32 : *
33 : *
34 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
35 : * Portions Copyright (c) 1994, Regents of the University of California
36 : *
37 : * IDENTIFICATION
38 : * src/backend/utils/cache/typcache.c
39 : *
40 : *-------------------------------------------------------------------------
41 : */
42 : #include "postgres.h"
43 :
44 : #include <limits.h>
45 :
46 : #include "access/hash.h"
47 : #include "access/htup_details.h"
48 : #include "access/nbtree.h"
49 : #include "access/parallel.h"
50 : #include "access/relation.h"
51 : #include "access/session.h"
52 : #include "access/table.h"
53 : #include "catalog/pg_am.h"
54 : #include "catalog/pg_constraint.h"
55 : #include "catalog/pg_enum.h"
56 : #include "catalog/pg_operator.h"
57 : #include "catalog/pg_range.h"
58 : #include "catalog/pg_type.h"
59 : #include "commands/defrem.h"
60 : #include "executor/executor.h"
61 : #include "lib/dshash.h"
62 : #include "optimizer/optimizer.h"
63 : #include "port/pg_bitutils.h"
64 : #include "storage/lwlock.h"
65 : #include "utils/builtins.h"
66 : #include "utils/catcache.h"
67 : #include "utils/fmgroids.h"
68 : #include "utils/inval.h"
69 : #include "utils/lsyscache.h"
70 : #include "utils/memutils.h"
71 : #include "utils/rel.h"
72 : #include "utils/snapmgr.h"
73 : #include "utils/syscache.h"
74 : #include "utils/typcache.h"
75 :
76 :
77 : /* The main type cache hashtable searched by lookup_type_cache */
78 : static HTAB *TypeCacheHash = NULL;
79 :
80 : /* List of type cache entries for domain types */
81 : static TypeCacheEntry *firstDomainTypeEntry = NULL;
82 :
83 : /* Private flag bits in the TypeCacheEntry.flags field */
84 : #define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
85 : #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
86 : #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
87 : #define TCFLAGS_CHECKED_EQ_OPR 0x000008
88 : #define TCFLAGS_CHECKED_LT_OPR 0x000010
89 : #define TCFLAGS_CHECKED_GT_OPR 0x000020
90 : #define TCFLAGS_CHECKED_CMP_PROC 0x000040
91 : #define TCFLAGS_CHECKED_HASH_PROC 0x000080
92 : #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
93 : #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
94 : #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
95 : #define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
96 : #define TCFLAGS_HAVE_ELEM_HASHING 0x001000
97 : #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
98 : #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
99 : #define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
100 : #define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
101 : #define TCFLAGS_HAVE_FIELD_HASHING 0x020000
102 : #define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
103 : #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
104 : #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
105 :
106 : /* The flags associated with equality/comparison/hashing are all but these: */
107 : #define TCFLAGS_OPERATOR_FLAGS \
108 : (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
109 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
110 : TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
111 :
112 : /*
113 : * Data stored about a domain type's constraints. Note that we do not create
114 : * this struct for the common case of a constraint-less domain; we just set
115 : * domainData to NULL to indicate that.
116 : *
117 : * Within a DomainConstraintCache, we store expression plan trees, but the
118 : * check_exprstate fields of the DomainConstraintState nodes are just NULL.
119 : * When needed, expression evaluation nodes are built by flat-copying the
120 : * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
121 : * Such a node tree is not part of the DomainConstraintCache, but is
122 : * considered to belong to a DomainConstraintRef.
123 : */
124 : struct DomainConstraintCache
125 : {
126 : List *constraints; /* list of DomainConstraintState nodes */
127 : MemoryContext dccContext; /* memory context holding all associated data */
128 : long dccRefCount; /* number of references to this struct */
129 : };
130 :
131 : /* Private information to support comparisons of enum values */
132 : typedef struct
133 : {
134 : Oid enum_oid; /* OID of one enum value */
135 : float4 sort_order; /* its sort position */
136 : } EnumItem;
137 :
138 : typedef struct TypeCacheEnumData
139 : {
140 : Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
141 : Bitmapset *sorted_values; /* Set of OIDs known to be in order */
142 : int num_values; /* total number of values in enum */
143 : EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER];
144 : } TypeCacheEnumData;
145 :
146 : /*
147 : * We use a separate table for storing the definitions of non-anonymous
148 : * record types. Once defined, a record type will be remembered for the
149 : * life of the backend. Subsequent uses of the "same" record type (where
150 : * sameness means equalTupleDescs) will refer to the existing table entry.
151 : *
152 : * Stored record types are remembered in a linear array of TupleDescs,
153 : * which can be indexed quickly with the assigned typmod. There is also
154 : * a hash table to speed searches for matching TupleDescs.
155 : */
156 :
157 : typedef struct RecordCacheEntry
158 : {
159 : TupleDesc tupdesc;
160 : } RecordCacheEntry;
161 :
162 : /*
163 : * To deal with non-anonymous record types that are exchanged by backends
164 : * involved in a parallel query, we also need a shared version of the above.
165 : */
166 : struct SharedRecordTypmodRegistry
167 : {
168 : /* A hash table for finding a matching TupleDesc. */
169 : dshash_table_handle record_table_handle;
170 : /* A hash table for finding a TupleDesc by typmod. */
171 : dshash_table_handle typmod_table_handle;
172 : /* A source of new record typmod numbers. */
173 : pg_atomic_uint32 next_typmod;
174 : };
175 :
176 : /*
177 : * When using shared tuple descriptors as hash table keys we need a way to be
178 : * able to search for an equal shared TupleDesc using a backend-local
179 : * TupleDesc. So we use this type which can hold either, and hash and compare
180 : * functions that know how to handle both.
181 : */
182 : typedef struct SharedRecordTableKey
183 : {
184 : union
185 : {
186 : TupleDesc local_tupdesc;
187 : dsa_pointer shared_tupdesc;
188 : } u;
189 : bool shared;
190 : } SharedRecordTableKey;
191 :
192 : /*
193 : * The shared version of RecordCacheEntry. This lets us look up a typmod
194 : * using a TupleDesc which may be in local or shared memory.
195 : */
196 : typedef struct SharedRecordTableEntry
197 : {
198 : SharedRecordTableKey key;
199 : } SharedRecordTableEntry;
200 :
201 : /*
202 : * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
203 : * up a TupleDesc in shared memory using a typmod.
204 : */
205 : typedef struct SharedTypmodTableEntry
206 : {
207 : uint32 typmod;
208 : dsa_pointer shared_tupdesc;
209 : } SharedTypmodTableEntry;
210 :
211 : /*
212 : * A comparator function for SharedRecordTableKey.
213 : */
214 : static int
215 24 : shared_record_table_compare(const void *a, const void *b, size_t size,
216 : void *arg)
217 : {
218 24 : dsa_area *area = (dsa_area *) arg;
219 24 : SharedRecordTableKey *k1 = (SharedRecordTableKey *) a;
220 24 : SharedRecordTableKey *k2 = (SharedRecordTableKey *) b;
221 : TupleDesc t1;
222 : TupleDesc t2;
223 :
224 24 : if (k1->shared)
225 0 : t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
226 : else
227 24 : t1 = k1->u.local_tupdesc;
228 :
229 24 : if (k2->shared)
230 24 : t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
231 : else
232 0 : t2 = k2->u.local_tupdesc;
233 :
234 24 : return equalTupleDescs(t1, t2) ? 0 : 1;
235 : }
236 :
237 : /*
238 : * A hash function for SharedRecordTableKey.
239 : */
240 : static uint32
241 114 : shared_record_table_hash(const void *a, size_t size, void *arg)
242 : {
243 114 : dsa_area *area = (dsa_area *) arg;
244 114 : SharedRecordTableKey *k = (SharedRecordTableKey *) a;
245 : TupleDesc t;
246 :
247 114 : if (k->shared)
248 0 : t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
249 : else
250 114 : t = k->u.local_tupdesc;
251 :
252 114 : return hashTupleDesc(t);
253 : }
254 :
255 : /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
256 : static const dshash_parameters srtr_record_table_params = {
257 : sizeof(SharedRecordTableKey), /* unused */
258 : sizeof(SharedRecordTableEntry),
259 : shared_record_table_compare,
260 : shared_record_table_hash,
261 : LWTRANCHE_PER_SESSION_RECORD_TYPE
262 : };
263 :
264 : /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
265 : static const dshash_parameters srtr_typmod_table_params = {
266 : sizeof(uint32),
267 : sizeof(SharedTypmodTableEntry),
268 : dshash_memcmp,
269 : dshash_memhash,
270 : LWTRANCHE_PER_SESSION_RECORD_TYPMOD
271 : };
272 :
273 : /* hashtable for recognizing registered record types */
274 : static HTAB *RecordCacheHash = NULL;
275 :
276 : typedef struct RecordCacheArrayEntry
277 : {
278 : uint64 id;
279 : TupleDesc tupdesc;
280 : } RecordCacheArrayEntry;
281 :
282 : /* array of info about registered record types, indexed by assigned typmod */
283 : static RecordCacheArrayEntry *RecordCacheArray = NULL;
284 : static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
285 : static int32 NextRecordTypmod = 0; /* number of entries used */
286 :
287 : /*
288 : * Process-wide counter for generating unique tupledesc identifiers.
289 : * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
290 : * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
291 : */
292 : static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER;
293 :
294 : static void load_typcache_tupdesc(TypeCacheEntry *typentry);
295 : static void load_rangetype_info(TypeCacheEntry *typentry);
296 : static void load_multirangetype_info(TypeCacheEntry *typentry);
297 : static void load_domaintype_info(TypeCacheEntry *typentry);
298 : static int dcs_cmp(const void *a, const void *b);
299 : static void decr_dcc_refcount(DomainConstraintCache *dcc);
300 : static void dccref_deletion_callback(void *arg);
301 : static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
302 : static bool array_element_has_equality(TypeCacheEntry *typentry);
303 : static bool array_element_has_compare(TypeCacheEntry *typentry);
304 : static bool array_element_has_hashing(TypeCacheEntry *typentry);
305 : static bool array_element_has_extended_hashing(TypeCacheEntry *typentry);
306 : static void cache_array_element_properties(TypeCacheEntry *typentry);
307 : static bool record_fields_have_equality(TypeCacheEntry *typentry);
308 : static bool record_fields_have_compare(TypeCacheEntry *typentry);
309 : static bool record_fields_have_hashing(TypeCacheEntry *typentry);
310 : static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry);
311 : static void cache_record_field_properties(TypeCacheEntry *typentry);
312 : static bool range_element_has_hashing(TypeCacheEntry *typentry);
313 : static bool range_element_has_extended_hashing(TypeCacheEntry *typentry);
314 : static void cache_range_element_properties(TypeCacheEntry *typentry);
315 : static bool multirange_element_has_hashing(TypeCacheEntry *typentry);
316 : static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry);
317 : static void cache_multirange_element_properties(TypeCacheEntry *typentry);
318 : static void TypeCacheRelCallback(Datum arg, Oid relid);
319 : static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
320 : static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
321 : static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
322 : static void load_enum_cache_data(TypeCacheEntry *tcache);
323 : static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
324 : static int enum_oid_cmp(const void *left, const void *right);
325 : static void shared_record_typmod_registry_detach(dsm_segment *segment,
326 : Datum datum);
327 : static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc);
328 : static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
329 : uint32 typmod);
330 :
331 :
332 : /*
333 : * lookup_type_cache
334 : *
335 : * Fetch the type cache entry for the specified datatype, and make sure that
336 : * all the fields requested by bits in 'flags' are valid.
337 : *
338 : * The result is never NULL --- we will ereport() if the passed type OID is
339 : * invalid. Note however that we may fail to find one or more of the
340 : * values requested by 'flags'; the caller needs to check whether the fields
341 : * are InvalidOid or not.
342 : */
343 : TypeCacheEntry *
344 561338 : lookup_type_cache(Oid type_id, int flags)
345 : {
346 : TypeCacheEntry *typentry;
347 : bool found;
348 :
349 561338 : if (TypeCacheHash == NULL)
350 : {
351 : /* First time through: initialize the hash table */
352 : HASHCTL ctl;
353 :
354 5872 : ctl.keysize = sizeof(Oid);
355 5872 : ctl.entrysize = sizeof(TypeCacheEntry);
356 5872 : TypeCacheHash = hash_create("Type information cache", 64,
357 : &ctl, HASH_ELEM | HASH_BLOBS);
358 :
359 : /* Also set up callbacks for SI invalidations */
360 5872 : CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
361 5872 : CacheRegisterSyscacheCallback(TYPEOID, TypeCacheTypCallback, (Datum) 0);
362 5872 : CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
363 5872 : CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
364 :
365 : /* Also make sure CacheMemoryContext exists */
366 5872 : if (!CacheMemoryContext)
367 0 : CreateCacheMemoryContext();
368 : }
369 :
370 : /* Try to look up an existing entry */
371 561338 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
372 : &type_id,
373 : HASH_FIND, NULL);
374 561338 : if (typentry == NULL)
375 : {
376 : /*
377 : * If we didn't find one, we want to make one. But first look up the
378 : * pg_type row, just to make sure we don't make a cache entry for an
379 : * invalid type OID. If the type OID is not valid, present a
380 : * user-facing error, since some code paths such as domain_in() allow
381 : * this function to be reached with a user-supplied OID.
382 : */
383 : HeapTuple tp;
384 : Form_pg_type typtup;
385 :
386 25582 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
387 25582 : if (!HeapTupleIsValid(tp))
388 0 : ereport(ERROR,
389 : (errcode(ERRCODE_UNDEFINED_OBJECT),
390 : errmsg("type with OID %u does not exist", type_id)));
391 25582 : typtup = (Form_pg_type) GETSTRUCT(tp);
392 25582 : if (!typtup->typisdefined)
393 0 : ereport(ERROR,
394 : (errcode(ERRCODE_UNDEFINED_OBJECT),
395 : errmsg("type \"%s\" is only a shell",
396 : NameStr(typtup->typname))));
397 :
398 : /* Now make the typcache entry */
399 25582 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
400 : &type_id,
401 : HASH_ENTER, &found);
402 : Assert(!found); /* it wasn't there a moment ago */
403 :
404 1586084 : MemSet(typentry, 0, sizeof(TypeCacheEntry));
405 :
406 : /* These fields can never change, by definition */
407 25582 : typentry->type_id = type_id;
408 25582 : typentry->type_id_hash = GetSysCacheHashValue1(TYPEOID,
409 : ObjectIdGetDatum(type_id));
410 :
411 : /* Keep this part in sync with the code below */
412 25582 : typentry->typlen = typtup->typlen;
413 25582 : typentry->typbyval = typtup->typbyval;
414 25582 : typentry->typalign = typtup->typalign;
415 25582 : typentry->typstorage = typtup->typstorage;
416 25582 : typentry->typtype = typtup->typtype;
417 25582 : typentry->typrelid = typtup->typrelid;
418 25582 : typentry->typsubscript = typtup->typsubscript;
419 25582 : typentry->typelem = typtup->typelem;
420 25582 : typentry->typcollation = typtup->typcollation;
421 25582 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
422 :
423 : /* If it's a domain, immediately thread it into the domain cache list */
424 25582 : if (typentry->typtype == TYPTYPE_DOMAIN)
425 : {
426 1104 : typentry->nextDomain = firstDomainTypeEntry;
427 1104 : firstDomainTypeEntry = typentry;
428 : }
429 :
430 25582 : ReleaseSysCache(tp);
431 : }
432 535756 : else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
433 : {
434 : /*
435 : * We have an entry, but its pg_type row got changed, so reload the
436 : * data obtained directly from pg_type.
437 : */
438 : HeapTuple tp;
439 : Form_pg_type typtup;
440 :
441 450 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
442 450 : if (!HeapTupleIsValid(tp))
443 0 : ereport(ERROR,
444 : (errcode(ERRCODE_UNDEFINED_OBJECT),
445 : errmsg("type with OID %u does not exist", type_id)));
446 450 : typtup = (Form_pg_type) GETSTRUCT(tp);
447 450 : if (!typtup->typisdefined)
448 0 : ereport(ERROR,
449 : (errcode(ERRCODE_UNDEFINED_OBJECT),
450 : errmsg("type \"%s\" is only a shell",
451 : NameStr(typtup->typname))));
452 :
453 : /*
454 : * Keep this part in sync with the code above. Many of these fields
455 : * shouldn't ever change, particularly typtype, but copy 'em anyway.
456 : */
457 450 : typentry->typlen = typtup->typlen;
458 450 : typentry->typbyval = typtup->typbyval;
459 450 : typentry->typalign = typtup->typalign;
460 450 : typentry->typstorage = typtup->typstorage;
461 450 : typentry->typtype = typtup->typtype;
462 450 : typentry->typrelid = typtup->typrelid;
463 450 : typentry->typsubscript = typtup->typsubscript;
464 450 : typentry->typelem = typtup->typelem;
465 450 : typentry->typcollation = typtup->typcollation;
466 450 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
467 :
468 450 : ReleaseSysCache(tp);
469 : }
470 :
471 : /*
472 : * Look up opclasses if we haven't already and any dependent info is
473 : * requested.
474 : */
475 561338 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
476 : TYPECACHE_CMP_PROC |
477 : TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
478 348070 : TYPECACHE_BTREE_OPFAMILY)) &&
479 348070 : !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
480 : {
481 : Oid opclass;
482 :
483 21918 : opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
484 21918 : if (OidIsValid(opclass))
485 : {
486 21218 : typentry->btree_opf = get_opclass_family(opclass);
487 21218 : typentry->btree_opintype = get_opclass_input_type(opclass);
488 : }
489 : else
490 : {
491 700 : typentry->btree_opf = typentry->btree_opintype = InvalidOid;
492 : }
493 :
494 : /*
495 : * Reset information derived from btree opclass. Note in particular
496 : * that we'll redetermine the eq_opr even if we previously found one;
497 : * this matters in case a btree opclass has been added to a type that
498 : * previously had only a hash opclass.
499 : */
500 21918 : typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
501 : TCFLAGS_CHECKED_LT_OPR |
502 : TCFLAGS_CHECKED_GT_OPR |
503 : TCFLAGS_CHECKED_CMP_PROC);
504 21918 : typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
505 : }
506 :
507 : /*
508 : * If we need to look up equality operator, and there's no btree opclass,
509 : * force lookup of hash opclass.
510 : */
511 561338 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
512 327906 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
513 21730 : typentry->btree_opf == InvalidOid)
514 700 : flags |= TYPECACHE_HASH_OPFAMILY;
515 :
516 561338 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
517 : TYPECACHE_HASH_EXTENDED_PROC |
518 : TYPECACHE_HASH_EXTENDED_PROC_FINFO |
519 233144 : TYPECACHE_HASH_OPFAMILY)) &&
520 233144 : !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
521 : {
522 : Oid opclass;
523 :
524 16624 : opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
525 16624 : if (OidIsValid(opclass))
526 : {
527 16392 : typentry->hash_opf = get_opclass_family(opclass);
528 16392 : typentry->hash_opintype = get_opclass_input_type(opclass);
529 : }
530 : else
531 : {
532 232 : typentry->hash_opf = typentry->hash_opintype = InvalidOid;
533 : }
534 :
535 : /*
536 : * Reset information derived from hash opclass. We do *not* reset the
537 : * eq_opr; if we already found one from the btree opclass, that
538 : * decision is still good.
539 : */
540 16624 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
541 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
542 16624 : typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
543 : }
544 :
545 : /*
546 : * Look for requested operators and functions, if we haven't already.
547 : */
548 561338 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
549 327906 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
550 : {
551 21730 : Oid eq_opr = InvalidOid;
552 :
553 21730 : if (typentry->btree_opf != InvalidOid)
554 21030 : eq_opr = get_opfamily_member(typentry->btree_opf,
555 : typentry->btree_opintype,
556 : typentry->btree_opintype,
557 : BTEqualStrategyNumber);
558 21730 : if (eq_opr == InvalidOid &&
559 700 : typentry->hash_opf != InvalidOid)
560 536 : eq_opr = get_opfamily_member(typentry->hash_opf,
561 : typentry->hash_opintype,
562 : typentry->hash_opintype,
563 : HTEqualStrategyNumber);
564 :
565 : /*
566 : * If the proposed equality operator is array_eq or record_eq, check
567 : * to see if the element type or column types support equality. If
568 : * not, array_eq or record_eq would fail at runtime, so we don't want
569 : * to report that the type has equality. (We can omit similar
570 : * checking for ranges and multiranges because ranges can't be created
571 : * in the first place unless their subtypes support equality.)
572 : */
573 21730 : if (eq_opr == ARRAY_EQ_OP &&
574 2022 : !array_element_has_equality(typentry))
575 222 : eq_opr = InvalidOid;
576 21508 : else if (eq_opr == RECORD_EQ_OP &&
577 340 : !record_fields_have_equality(typentry))
578 126 : eq_opr = InvalidOid;
579 :
580 : /* Force update of eq_opr_finfo only if we're changing state */
581 21730 : if (typentry->eq_opr != eq_opr)
582 20368 : typentry->eq_opr_finfo.fn_oid = InvalidOid;
583 :
584 21730 : typentry->eq_opr = eq_opr;
585 :
586 : /*
587 : * Reset info about hash functions whenever we pick up new info about
588 : * equality operator. This is so we can ensure that the hash
589 : * functions match the operator.
590 : */
591 21730 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
592 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
593 21730 : typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
594 : }
595 561338 : if ((flags & TYPECACHE_LT_OPR) &&
596 201892 : !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
597 : {
598 13068 : Oid lt_opr = InvalidOid;
599 :
600 13068 : if (typentry->btree_opf != InvalidOid)
601 12814 : lt_opr = get_opfamily_member(typentry->btree_opf,
602 : typentry->btree_opintype,
603 : typentry->btree_opintype,
604 : BTLessStrategyNumber);
605 :
606 : /*
607 : * As above, make sure array_cmp or record_cmp will succeed; but again
608 : * we need no special check for ranges or multiranges.
609 : */
610 13068 : if (lt_opr == ARRAY_LT_OP &&
611 1578 : !array_element_has_compare(typentry))
612 340 : lt_opr = InvalidOid;
613 12728 : else if (lt_opr == RECORD_LT_OP &&
614 114 : !record_fields_have_compare(typentry))
615 12 : lt_opr = InvalidOid;
616 :
617 13068 : typentry->lt_opr = lt_opr;
618 13068 : typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
619 : }
620 561338 : if ((flags & TYPECACHE_GT_OPR) &&
621 197190 : !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
622 : {
623 13034 : Oid gt_opr = InvalidOid;
624 :
625 13034 : if (typentry->btree_opf != InvalidOid)
626 12780 : gt_opr = get_opfamily_member(typentry->btree_opf,
627 : typentry->btree_opintype,
628 : typentry->btree_opintype,
629 : BTGreaterStrategyNumber);
630 :
631 : /*
632 : * As above, make sure array_cmp or record_cmp will succeed; but again
633 : * we need no special check for ranges or multiranges.
634 : */
635 13034 : if (gt_opr == ARRAY_GT_OP &&
636 1572 : !array_element_has_compare(typentry))
637 340 : gt_opr = InvalidOid;
638 12694 : else if (gt_opr == RECORD_GT_OP &&
639 114 : !record_fields_have_compare(typentry))
640 12 : gt_opr = InvalidOid;
641 :
642 13034 : typentry->gt_opr = gt_opr;
643 13034 : typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
644 : }
645 561338 : if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
646 17872 : !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
647 : {
648 3020 : Oid cmp_proc = InvalidOid;
649 :
650 3020 : if (typentry->btree_opf != InvalidOid)
651 2890 : cmp_proc = get_opfamily_proc(typentry->btree_opf,
652 : typentry->btree_opintype,
653 : typentry->btree_opintype,
654 : BTORDER_PROC);
655 :
656 : /*
657 : * As above, make sure array_cmp or record_cmp will succeed; but again
658 : * we need no special check for ranges or multiranges.
659 : */
660 3020 : if (cmp_proc == F_BTARRAYCMP &&
661 568 : !array_element_has_compare(typentry))
662 108 : cmp_proc = InvalidOid;
663 2912 : else if (cmp_proc == F_BTRECORDCMP &&
664 176 : !record_fields_have_compare(typentry))
665 108 : cmp_proc = InvalidOid;
666 :
667 : /* Force update of cmp_proc_finfo only if we're changing state */
668 3020 : if (typentry->cmp_proc != cmp_proc)
669 2644 : typentry->cmp_proc_finfo.fn_oid = InvalidOid;
670 :
671 3020 : typentry->cmp_proc = cmp_proc;
672 3020 : typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
673 : }
674 561338 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
675 232566 : !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
676 : {
677 16182 : Oid hash_proc = InvalidOid;
678 :
679 : /*
680 : * We insist that the eq_opr, if one has been determined, match the
681 : * hash opclass; else report there is no hash function.
682 : */
683 16182 : if (typentry->hash_opf != InvalidOid &&
684 31722 : (!OidIsValid(typentry->eq_opr) ||
685 15716 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
686 : typentry->hash_opintype,
687 : typentry->hash_opintype,
688 : HTEqualStrategyNumber)))
689 16006 : hash_proc = get_opfamily_proc(typentry->hash_opf,
690 : typentry->hash_opintype,
691 : typentry->hash_opintype,
692 : HASHSTANDARD_PROC);
693 :
694 : /*
695 : * As above, make sure hash_array, hash_record, or hash_range will
696 : * succeed.
697 : */
698 16182 : if (hash_proc == F_HASH_ARRAY &&
699 1166 : !array_element_has_hashing(typentry))
700 120 : hash_proc = InvalidOid;
701 16062 : else if (hash_proc == F_HASH_RECORD &&
702 330 : !record_fields_have_hashing(typentry))
703 152 : hash_proc = InvalidOid;
704 15910 : else if (hash_proc == F_HASH_RANGE &&
705 20 : !range_element_has_hashing(typentry))
706 6 : hash_proc = InvalidOid;
707 :
708 : /*
709 : * Likewise for hash_multirange.
710 : */
711 16182 : if (hash_proc == F_HASH_MULTIRANGE &&
712 12 : !multirange_element_has_hashing(typentry))
713 6 : hash_proc = InvalidOid;
714 :
715 : /* Force update of hash_proc_finfo only if we're changing state */
716 16182 : if (typentry->hash_proc != hash_proc)
717 14958 : typentry->hash_proc_finfo.fn_oid = InvalidOid;
718 :
719 16182 : typentry->hash_proc = hash_proc;
720 16182 : typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
721 : }
722 561338 : if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
723 5538 : TYPECACHE_HASH_EXTENDED_PROC_FINFO)) &&
724 5538 : !(typentry->flags & TCFLAGS_CHECKED_HASH_EXTENDED_PROC))
725 : {
726 2442 : Oid hash_extended_proc = InvalidOid;
727 :
728 : /*
729 : * We insist that the eq_opr, if one has been determined, match the
730 : * hash opclass; else report there is no hash function.
731 : */
732 2442 : if (typentry->hash_opf != InvalidOid &&
733 4564 : (!OidIsValid(typentry->eq_opr) ||
734 2158 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
735 : typentry->hash_opintype,
736 : typentry->hash_opintype,
737 : HTEqualStrategyNumber)))
738 2406 : hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
739 : typentry->hash_opintype,
740 : typentry->hash_opintype,
741 : HASHEXTENDED_PROC);
742 :
743 : /*
744 : * As above, make sure hash_array_extended, hash_record_extended, or
745 : * hash_range_extended will succeed.
746 : */
747 2442 : if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
748 228 : !array_element_has_extended_hashing(typentry))
749 108 : hash_extended_proc = InvalidOid;
750 2334 : else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
751 122 : !record_fields_have_extended_hashing(typentry))
752 114 : hash_extended_proc = InvalidOid;
753 2220 : else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
754 0 : !range_element_has_extended_hashing(typentry))
755 0 : hash_extended_proc = InvalidOid;
756 :
757 : /*
758 : * Likewise for hash_multirange_extended.
759 : */
760 2442 : if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
761 0 : !multirange_element_has_extended_hashing(typentry))
762 0 : hash_extended_proc = InvalidOid;
763 :
764 : /* Force update of proc finfo only if we're changing state */
765 2442 : if (typentry->hash_extended_proc != hash_extended_proc)
766 2176 : typentry->hash_extended_proc_finfo.fn_oid = InvalidOid;
767 :
768 2442 : typentry->hash_extended_proc = hash_extended_proc;
769 2442 : typentry->flags |= TCFLAGS_CHECKED_HASH_EXTENDED_PROC;
770 : }
771 :
772 : /*
773 : * Set up fmgr lookup info as requested
774 : *
775 : * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
776 : * which is not quite right (they're really in the hash table's private
777 : * memory context) but this will do for our purposes.
778 : *
779 : * Note: the code above avoids invalidating the finfo structs unless the
780 : * referenced operator/function OID actually changes. This is to prevent
781 : * unnecessary leakage of any subsidiary data attached to an finfo, since
782 : * that would cause session-lifespan memory leaks.
783 : */
784 561338 : if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
785 4518 : typentry->eq_opr_finfo.fn_oid == InvalidOid &&
786 1278 : typentry->eq_opr != InvalidOid)
787 : {
788 : Oid eq_opr_func;
789 :
790 1272 : eq_opr_func = get_opcode(typentry->eq_opr);
791 1272 : if (eq_opr_func != InvalidOid)
792 1272 : fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
793 : CacheMemoryContext);
794 : }
795 561338 : if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
796 9568 : typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
797 2686 : typentry->cmp_proc != InvalidOid)
798 : {
799 1058 : fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
800 : CacheMemoryContext);
801 : }
802 561338 : if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
803 5320 : typentry->hash_proc_finfo.fn_oid == InvalidOid &&
804 1122 : typentry->hash_proc != InvalidOid)
805 : {
806 984 : fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
807 : CacheMemoryContext);
808 : }
809 561338 : if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
810 106 : typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
811 32 : typentry->hash_extended_proc != InvalidOid)
812 : {
813 20 : fmgr_info_cxt(typentry->hash_extended_proc,
814 : &typentry->hash_extended_proc_finfo,
815 : CacheMemoryContext);
816 : }
817 :
818 : /*
819 : * If it's a composite type (row type), get tupdesc if requested
820 : */
821 561338 : if ((flags & TYPECACHE_TUPDESC) &&
822 76622 : typentry->tupDesc == NULL &&
823 3268 : typentry->typtype == TYPTYPE_COMPOSITE)
824 : {
825 3142 : load_typcache_tupdesc(typentry);
826 : }
827 :
828 : /*
829 : * If requested, get information about a range type
830 : *
831 : * This includes making sure that the basic info about the range element
832 : * type is up-to-date.
833 : */
834 561338 : if ((flags & TYPECACHE_RANGE_INFO) &&
835 13378 : typentry->typtype == TYPTYPE_RANGE)
836 : {
837 13378 : if (typentry->rngelemtype == NULL)
838 434 : load_rangetype_info(typentry);
839 12944 : else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
840 0 : (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
841 : }
842 :
843 : /*
844 : * If requested, get information about a multirange type
845 : */
846 561338 : if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
847 7820 : typentry->rngtype == NULL &&
848 192 : typentry->typtype == TYPTYPE_MULTIRANGE)
849 : {
850 192 : load_multirangetype_info(typentry);
851 : }
852 :
853 : /*
854 : * If requested, get information about a domain type
855 : */
856 561338 : if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
857 7390 : typentry->domainBaseType == InvalidOid &&
858 4754 : typentry->typtype == TYPTYPE_DOMAIN)
859 : {
860 336 : typentry->domainBaseTypmod = -1;
861 336 : typentry->domainBaseType =
862 336 : getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
863 : }
864 561338 : if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
865 31990 : (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
866 5256 : typentry->typtype == TYPTYPE_DOMAIN)
867 : {
868 2030 : load_domaintype_info(typentry);
869 : }
870 :
871 561338 : return typentry;
872 : }
873 :
874 : /*
875 : * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
876 : */
877 : static void
878 3342 : load_typcache_tupdesc(TypeCacheEntry *typentry)
879 : {
880 : Relation rel;
881 :
882 3342 : if (!OidIsValid(typentry->typrelid)) /* should not happen */
883 0 : elog(ERROR, "invalid typrelid for composite type %u",
884 : typentry->type_id);
885 3342 : rel = relation_open(typentry->typrelid, AccessShareLock);
886 : Assert(rel->rd_rel->reltype == typentry->type_id);
887 :
888 : /*
889 : * Link to the tupdesc and increment its refcount (we assert it's a
890 : * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
891 : * because the reference mustn't be entered in the current resource owner;
892 : * it can outlive the current query.
893 : */
894 3342 : typentry->tupDesc = RelationGetDescr(rel);
895 :
896 : Assert(typentry->tupDesc->tdrefcount > 0);
897 3342 : typentry->tupDesc->tdrefcount++;
898 :
899 : /*
900 : * In future, we could take some pains to not change tupDesc_identifier if
901 : * the tupdesc didn't really change; but for now it's not worth it.
902 : */
903 3342 : typentry->tupDesc_identifier = ++tupledesc_id_counter;
904 :
905 3342 : relation_close(rel, AccessShareLock);
906 3342 : }
907 :
908 : /*
909 : * load_rangetype_info --- helper routine to set up range type information
910 : */
911 : static void
912 434 : load_rangetype_info(TypeCacheEntry *typentry)
913 : {
914 : Form_pg_range pg_range;
915 : HeapTuple tup;
916 : Oid subtypeOid;
917 : Oid opclassOid;
918 : Oid canonicalOid;
919 : Oid subdiffOid;
920 : Oid opfamilyOid;
921 : Oid opcintype;
922 : Oid cmpFnOid;
923 :
924 : /* get information from pg_range */
925 434 : tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
926 : /* should not fail, since we already checked typtype ... */
927 434 : if (!HeapTupleIsValid(tup))
928 0 : elog(ERROR, "cache lookup failed for range type %u",
929 : typentry->type_id);
930 434 : pg_range = (Form_pg_range) GETSTRUCT(tup);
931 :
932 434 : subtypeOid = pg_range->rngsubtype;
933 434 : typentry->rng_collation = pg_range->rngcollation;
934 434 : opclassOid = pg_range->rngsubopc;
935 434 : canonicalOid = pg_range->rngcanonical;
936 434 : subdiffOid = pg_range->rngsubdiff;
937 :
938 434 : ReleaseSysCache(tup);
939 :
940 : /* get opclass properties and look up the comparison function */
941 434 : opfamilyOid = get_opclass_family(opclassOid);
942 434 : opcintype = get_opclass_input_type(opclassOid);
943 :
944 434 : cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
945 : BTORDER_PROC);
946 434 : if (!RegProcedureIsValid(cmpFnOid))
947 0 : elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
948 : BTORDER_PROC, opcintype, opcintype, opfamilyOid);
949 :
950 : /* set up cached fmgrinfo structs */
951 434 : fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
952 : CacheMemoryContext);
953 434 : if (OidIsValid(canonicalOid))
954 190 : fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
955 : CacheMemoryContext);
956 434 : if (OidIsValid(subdiffOid))
957 314 : fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
958 : CacheMemoryContext);
959 :
960 : /* Lastly, set up link to the element type --- this marks data valid */
961 434 : typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
962 434 : }
963 :
964 : /*
965 : * load_multirangetype_info --- helper routine to set up multirange type
966 : * information
967 : */
968 : static void
969 192 : load_multirangetype_info(TypeCacheEntry *typentry)
970 : {
971 : Oid rangetypeOid;
972 :
973 192 : rangetypeOid = get_multirange_range(typentry->type_id);
974 192 : if (!OidIsValid(rangetypeOid))
975 0 : elog(ERROR, "cache lookup failed for multirange type %u",
976 : typentry->type_id);
977 :
978 192 : typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
979 192 : }
980 :
981 : /*
982 : * load_domaintype_info --- helper routine to set up domain constraint info
983 : *
984 : * Note: we assume we're called in a relatively short-lived context, so it's
985 : * okay to leak data into the current context while scanning pg_constraint.
986 : * We build the new DomainConstraintCache data in a context underneath
987 : * CurrentMemoryContext, and reparent it under CacheMemoryContext when
988 : * complete.
989 : */
990 : static void
991 2030 : load_domaintype_info(TypeCacheEntry *typentry)
992 : {
993 2030 : Oid typeOid = typentry->type_id;
994 : DomainConstraintCache *dcc;
995 2030 : bool notNull = false;
996 : DomainConstraintState **ccons;
997 : int cconslen;
998 : Relation conRel;
999 : MemoryContext oldcxt;
1000 :
1001 : /*
1002 : * If we're here, any existing constraint info is stale, so release it.
1003 : * For safety, be sure to null the link before trying to delete the data.
1004 : */
1005 2030 : if (typentry->domainData)
1006 : {
1007 492 : dcc = typentry->domainData;
1008 492 : typentry->domainData = NULL;
1009 492 : decr_dcc_refcount(dcc);
1010 : }
1011 :
1012 : /*
1013 : * We try to optimize the common case of no domain constraints, so don't
1014 : * create the dcc object and context until we find a constraint. Likewise
1015 : * for the temp sorting array.
1016 : */
1017 2030 : dcc = NULL;
1018 2030 : ccons = NULL;
1019 2030 : cconslen = 0;
1020 :
1021 : /*
1022 : * Scan pg_constraint for relevant constraints. We want to find
1023 : * constraints for not just this domain, but any ancestor domains, so the
1024 : * outer loop crawls up the domain stack.
1025 : */
1026 2030 : conRel = table_open(ConstraintRelationId, AccessShareLock);
1027 :
1028 : for (;;)
1029 2072 : {
1030 : HeapTuple tup;
1031 : HeapTuple conTup;
1032 : Form_pg_type typTup;
1033 4102 : int nccons = 0;
1034 : ScanKeyData key[1];
1035 : SysScanDesc scan;
1036 :
1037 4102 : tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1038 4102 : if (!HeapTupleIsValid(tup))
1039 0 : elog(ERROR, "cache lookup failed for type %u", typeOid);
1040 4102 : typTup = (Form_pg_type) GETSTRUCT(tup);
1041 :
1042 4102 : if (typTup->typtype != TYPTYPE_DOMAIN)
1043 : {
1044 : /* Not a domain, so done */
1045 2030 : ReleaseSysCache(tup);
1046 2030 : break;
1047 : }
1048 :
1049 : /* Test for NOT NULL Constraint */
1050 2072 : if (typTup->typnotnull)
1051 90 : notNull = true;
1052 :
1053 : /* Look for CHECK Constraints on this domain */
1054 2072 : ScanKeyInit(&key[0],
1055 : Anum_pg_constraint_contypid,
1056 : BTEqualStrategyNumber, F_OIDEQ,
1057 : ObjectIdGetDatum(typeOid));
1058 :
1059 2072 : scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1060 : NULL, 1, key);
1061 :
1062 2978 : while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1063 : {
1064 906 : Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
1065 : Datum val;
1066 : bool isNull;
1067 : char *constring;
1068 : Expr *check_expr;
1069 : DomainConstraintState *r;
1070 :
1071 : /* Ignore non-CHECK constraints (presently, shouldn't be any) */
1072 906 : if (c->contype != CONSTRAINT_CHECK)
1073 0 : continue;
1074 :
1075 : /* Not expecting conbin to be NULL, but we'll test for it anyway */
1076 906 : val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1077 : conRel->rd_att, &isNull);
1078 906 : if (isNull)
1079 0 : elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1080 : NameStr(typTup->typname), NameStr(c->conname));
1081 :
1082 : /* Convert conbin to C string in caller context */
1083 906 : constring = TextDatumGetCString(val);
1084 :
1085 : /* Create the DomainConstraintCache object and context if needed */
1086 906 : if (dcc == NULL)
1087 : {
1088 : MemoryContext cxt;
1089 :
1090 874 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1091 : "Domain constraints",
1092 : ALLOCSET_SMALL_SIZES);
1093 : dcc = (DomainConstraintCache *)
1094 874 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1095 874 : dcc->constraints = NIL;
1096 874 : dcc->dccContext = cxt;
1097 874 : dcc->dccRefCount = 0;
1098 : }
1099 :
1100 : /* Create node trees in DomainConstraintCache's context */
1101 906 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1102 :
1103 906 : check_expr = (Expr *) stringToNode(constring);
1104 :
1105 : /*
1106 : * Plan the expression, since ExecInitExpr will expect that.
1107 : *
1108 : * Note: caching the result of expression_planner() is not very
1109 : * good practice. Ideally we'd use a CachedExpression here so
1110 : * that we would react promptly to, eg, changes in inlined
1111 : * functions. However, because we don't support mutable domain
1112 : * CHECK constraints, it's not really clear that it's worth the
1113 : * extra overhead to do that.
1114 : */
1115 906 : check_expr = expression_planner(check_expr);
1116 :
1117 906 : r = makeNode(DomainConstraintState);
1118 906 : r->constrainttype = DOM_CONSTRAINT_CHECK;
1119 906 : r->name = pstrdup(NameStr(c->conname));
1120 906 : r->check_expr = check_expr;
1121 906 : r->check_exprstate = NULL;
1122 :
1123 906 : MemoryContextSwitchTo(oldcxt);
1124 :
1125 : /* Accumulate constraints in an array, for sorting below */
1126 906 : if (ccons == NULL)
1127 : {
1128 874 : cconslen = 8;
1129 : ccons = (DomainConstraintState **)
1130 874 : palloc(cconslen * sizeof(DomainConstraintState *));
1131 : }
1132 32 : else if (nccons >= cconslen)
1133 : {
1134 0 : cconslen *= 2;
1135 : ccons = (DomainConstraintState **)
1136 0 : repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1137 : }
1138 906 : ccons[nccons++] = r;
1139 : }
1140 :
1141 2072 : systable_endscan(scan);
1142 :
1143 2072 : if (nccons > 0)
1144 : {
1145 : /*
1146 : * Sort the items for this domain, so that CHECKs are applied in a
1147 : * deterministic order.
1148 : */
1149 894 : if (nccons > 1)
1150 10 : qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1151 :
1152 : /*
1153 : * Now attach them to the overall list. Use lcons() here because
1154 : * constraints of parent domains should be applied earlier.
1155 : */
1156 894 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1157 1800 : while (nccons > 0)
1158 906 : dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1159 894 : MemoryContextSwitchTo(oldcxt);
1160 : }
1161 :
1162 : /* loop to next domain in stack */
1163 2072 : typeOid = typTup->typbasetype;
1164 2072 : ReleaseSysCache(tup);
1165 : }
1166 :
1167 2030 : table_close(conRel, AccessShareLock);
1168 :
1169 : /*
1170 : * Only need to add one NOT NULL check regardless of how many domains in
1171 : * the stack request it.
1172 : */
1173 2030 : if (notNull)
1174 : {
1175 : DomainConstraintState *r;
1176 :
1177 : /* Create the DomainConstraintCache object and context if needed */
1178 90 : if (dcc == NULL)
1179 : {
1180 : MemoryContext cxt;
1181 :
1182 70 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1183 : "Domain constraints",
1184 : ALLOCSET_SMALL_SIZES);
1185 : dcc = (DomainConstraintCache *)
1186 70 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1187 70 : dcc->constraints = NIL;
1188 70 : dcc->dccContext = cxt;
1189 70 : dcc->dccRefCount = 0;
1190 : }
1191 :
1192 : /* Create node trees in DomainConstraintCache's context */
1193 90 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1194 :
1195 90 : r = makeNode(DomainConstraintState);
1196 :
1197 90 : r->constrainttype = DOM_CONSTRAINT_NOTNULL;
1198 90 : r->name = pstrdup("NOT NULL");
1199 90 : r->check_expr = NULL;
1200 90 : r->check_exprstate = NULL;
1201 :
1202 : /* lcons to apply the nullness check FIRST */
1203 90 : dcc->constraints = lcons(r, dcc->constraints);
1204 :
1205 90 : MemoryContextSwitchTo(oldcxt);
1206 : }
1207 :
1208 : /*
1209 : * If we made a constraint object, move it into CacheMemoryContext and
1210 : * attach it to the typcache entry.
1211 : */
1212 2030 : if (dcc)
1213 : {
1214 944 : MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
1215 944 : typentry->domainData = dcc;
1216 944 : dcc->dccRefCount++; /* count the typcache's reference */
1217 : }
1218 :
1219 : /* Either way, the typcache entry's domain data is now valid. */
1220 2030 : typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
1221 2030 : }
1222 :
1223 : /*
1224 : * qsort comparator to sort DomainConstraintState pointers by name
1225 : */
1226 : static int
1227 12 : dcs_cmp(const void *a, const void *b)
1228 : {
1229 12 : const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1230 12 : const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1231 :
1232 12 : return strcmp((*ca)->name, (*cb)->name);
1233 : }
1234 :
1235 : /*
1236 : * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1237 : * and free it if no references remain
1238 : */
1239 : static void
1240 9492 : decr_dcc_refcount(DomainConstraintCache *dcc)
1241 : {
1242 : Assert(dcc->dccRefCount > 0);
1243 9492 : if (--(dcc->dccRefCount) <= 0)
1244 488 : MemoryContextDelete(dcc->dccContext);
1245 9492 : }
1246 :
1247 : /*
1248 : * Context reset/delete callback for a DomainConstraintRef
1249 : */
1250 : static void
1251 9516 : dccref_deletion_callback(void *arg)
1252 : {
1253 9516 : DomainConstraintRef *ref = (DomainConstraintRef *) arg;
1254 9516 : DomainConstraintCache *dcc = ref->dcc;
1255 :
1256 : /* Paranoia --- be sure link is nulled before trying to release */
1257 9516 : if (dcc)
1258 : {
1259 9000 : ref->constraints = NIL;
1260 9000 : ref->dcc = NULL;
1261 9000 : decr_dcc_refcount(dcc);
1262 : }
1263 9516 : }
1264 :
1265 : /*
1266 : * prep_domain_constraints --- prepare domain constraints for execution
1267 : *
1268 : * The expression trees stored in the DomainConstraintCache's list are
1269 : * converted to executable expression state trees stored in execctx.
1270 : */
1271 : static List *
1272 2332 : prep_domain_constraints(List *constraints, MemoryContext execctx)
1273 : {
1274 2332 : List *result = NIL;
1275 : MemoryContext oldcxt;
1276 : ListCell *lc;
1277 :
1278 2332 : oldcxt = MemoryContextSwitchTo(execctx);
1279 :
1280 4688 : foreach(lc, constraints)
1281 : {
1282 2356 : DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
1283 : DomainConstraintState *newr;
1284 :
1285 2356 : newr = makeNode(DomainConstraintState);
1286 2356 : newr->constrainttype = r->constrainttype;
1287 2356 : newr->name = r->name;
1288 2356 : newr->check_expr = r->check_expr;
1289 2356 : newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1290 :
1291 2356 : result = lappend(result, newr);
1292 : }
1293 :
1294 2332 : MemoryContextSwitchTo(oldcxt);
1295 :
1296 2332 : return result;
1297 : }
1298 :
1299 : /*
1300 : * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1301 : *
1302 : * Caller must tell us the MemoryContext in which the DomainConstraintRef
1303 : * lives. The ref will be cleaned up when that context is reset/deleted.
1304 : *
1305 : * Caller must also tell us whether it wants check_exprstate fields to be
1306 : * computed in the DomainConstraintState nodes attached to this ref.
1307 : * If it doesn't, we need not make a copy of the DomainConstraintState list.
1308 : */
1309 : void
1310 9544 : InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
1311 : MemoryContext refctx, bool need_exprstate)
1312 : {
1313 : /* Look up the typcache entry --- we assume it survives indefinitely */
1314 9544 : ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1315 9544 : ref->need_exprstate = need_exprstate;
1316 : /* For safety, establish the callback before acquiring a refcount */
1317 9544 : ref->refctx = refctx;
1318 9544 : ref->dcc = NULL;
1319 9544 : ref->callback.func = dccref_deletion_callback;
1320 9544 : ref->callback.arg = (void *) ref;
1321 9544 : MemoryContextRegisterResetCallback(refctx, &ref->callback);
1322 : /* Acquire refcount if there are constraints, and set up exported list */
1323 9544 : if (ref->tcache->domainData)
1324 : {
1325 9028 : ref->dcc = ref->tcache->domainData;
1326 9028 : ref->dcc->dccRefCount++;
1327 9028 : if (ref->need_exprstate)
1328 2332 : ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1329 : ref->refctx);
1330 : else
1331 6696 : ref->constraints = ref->dcc->constraints;
1332 : }
1333 : else
1334 516 : ref->constraints = NIL;
1335 9544 : }
1336 :
1337 : /*
1338 : * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1339 : *
1340 : * If the domain's constraint set changed, ref->constraints is updated to
1341 : * point at a new list of cached constraints.
1342 : *
1343 : * In the normal case where nothing happened to the domain, this is cheap
1344 : * enough that it's reasonable (and expected) to check before *each* use
1345 : * of the constraint info.
1346 : */
1347 : void
1348 275552 : UpdateDomainConstraintRef(DomainConstraintRef *ref)
1349 : {
1350 275552 : TypeCacheEntry *typentry = ref->tcache;
1351 :
1352 : /* Make sure typcache entry's data is up to date */
1353 275552 : if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1354 0 : typentry->typtype == TYPTYPE_DOMAIN)
1355 0 : load_domaintype_info(typentry);
1356 :
1357 : /* Transfer to ref object if there's new info, adjusting refcounts */
1358 275552 : if (ref->dcc != typentry->domainData)
1359 : {
1360 : /* Paranoia --- be sure link is nulled before trying to release */
1361 0 : DomainConstraintCache *dcc = ref->dcc;
1362 :
1363 0 : if (dcc)
1364 : {
1365 : /*
1366 : * Note: we just leak the previous list of executable domain
1367 : * constraints. Alternatively, we could keep those in a child
1368 : * context of ref->refctx and free that context at this point.
1369 : * However, in practice this code path will be taken so seldom
1370 : * that the extra bookkeeping for a child context doesn't seem
1371 : * worthwhile; we'll just allow a leak for the lifespan of refctx.
1372 : */
1373 0 : ref->constraints = NIL;
1374 0 : ref->dcc = NULL;
1375 0 : decr_dcc_refcount(dcc);
1376 : }
1377 0 : dcc = typentry->domainData;
1378 0 : if (dcc)
1379 : {
1380 0 : ref->dcc = dcc;
1381 0 : dcc->dccRefCount++;
1382 0 : if (ref->need_exprstate)
1383 0 : ref->constraints = prep_domain_constraints(dcc->constraints,
1384 : ref->refctx);
1385 : else
1386 0 : ref->constraints = dcc->constraints;
1387 : }
1388 : }
1389 275552 : }
1390 :
1391 : /*
1392 : * DomainHasConstraints --- utility routine to check if a domain has constraints
1393 : *
1394 : * This is defined to return false, not fail, if type is not a domain.
1395 : */
1396 : bool
1397 22446 : DomainHasConstraints(Oid type_id)
1398 : {
1399 : TypeCacheEntry *typentry;
1400 :
1401 : /*
1402 : * Note: a side effect is to cause the typcache's domain data to become
1403 : * valid. This is fine since we'll likely need it soon if there is any.
1404 : */
1405 22446 : typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1406 :
1407 22446 : return (typentry->domainData != NULL);
1408 : }
1409 :
1410 :
1411 : /*
1412 : * array_element_has_equality and friends are helper routines to check
1413 : * whether we should believe that array_eq and related functions will work
1414 : * on the given array type or composite type.
1415 : *
1416 : * The logic above may call these repeatedly on the same type entry, so we
1417 : * make use of the typentry->flags field to cache the results once known.
1418 : * Also, we assume that we'll probably want all these facts about the type
1419 : * if we want any, so we cache them all using only one lookup of the
1420 : * component datatype(s).
1421 : */
1422 :
1423 : static bool
1424 2022 : array_element_has_equality(TypeCacheEntry *typentry)
1425 : {
1426 2022 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1427 1722 : cache_array_element_properties(typentry);
1428 2022 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1429 : }
1430 :
1431 : static bool
1432 3718 : array_element_has_compare(TypeCacheEntry *typentry)
1433 : {
1434 3718 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1435 332 : cache_array_element_properties(typentry);
1436 3718 : return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1437 : }
1438 :
1439 : static bool
1440 1166 : array_element_has_hashing(TypeCacheEntry *typentry)
1441 : {
1442 1166 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1443 0 : cache_array_element_properties(typentry);
1444 1166 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1445 : }
1446 :
1447 : static bool
1448 228 : array_element_has_extended_hashing(TypeCacheEntry *typentry)
1449 : {
1450 228 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1451 0 : cache_array_element_properties(typentry);
1452 228 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1453 : }
1454 :
1455 : static void
1456 2054 : cache_array_element_properties(TypeCacheEntry *typentry)
1457 : {
1458 2054 : Oid elem_type = get_base_element_type(typentry->type_id);
1459 :
1460 2054 : if (OidIsValid(elem_type))
1461 : {
1462 : TypeCacheEntry *elementry;
1463 :
1464 1940 : elementry = lookup_type_cache(elem_type,
1465 : TYPECACHE_EQ_OPR |
1466 : TYPECACHE_CMP_PROC |
1467 : TYPECACHE_HASH_PROC |
1468 : TYPECACHE_HASH_EXTENDED_PROC);
1469 1940 : if (OidIsValid(elementry->eq_opr))
1470 1832 : typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1471 1940 : if (OidIsValid(elementry->cmp_proc))
1472 1714 : typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1473 1940 : if (OidIsValid(elementry->hash_proc))
1474 1820 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1475 1940 : if (OidIsValid(elementry->hash_extended_proc))
1476 1820 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1477 : }
1478 2054 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1479 2054 : }
1480 :
1481 : /*
1482 : * Likewise, some helper functions for composite types.
1483 : */
1484 :
1485 : static bool
1486 340 : record_fields_have_equality(TypeCacheEntry *typentry)
1487 : {
1488 340 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1489 312 : cache_record_field_properties(typentry);
1490 340 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1491 : }
1492 :
1493 : static bool
1494 404 : record_fields_have_compare(TypeCacheEntry *typentry)
1495 : {
1496 404 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1497 54 : cache_record_field_properties(typentry);
1498 404 : return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1499 : }
1500 :
1501 : static bool
1502 330 : record_fields_have_hashing(TypeCacheEntry *typentry)
1503 : {
1504 330 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1505 6 : cache_record_field_properties(typentry);
1506 330 : return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1507 : }
1508 :
1509 : static bool
1510 122 : record_fields_have_extended_hashing(TypeCacheEntry *typentry)
1511 : {
1512 122 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1513 0 : cache_record_field_properties(typentry);
1514 122 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1515 : }
1516 :
1517 : static void
1518 372 : cache_record_field_properties(TypeCacheEntry *typentry)
1519 : {
1520 : /*
1521 : * For type RECORD, we can't really tell what will work, since we don't
1522 : * have access here to the specific anonymous type. Just assume that
1523 : * equality and comparison will (we may get a failure at runtime). We
1524 : * could also claim that hashing works, but then if code that has the
1525 : * option between a comparison-based (sort-based) and a hash-based plan
1526 : * chooses hashing, stuff could fail that would otherwise work if it chose
1527 : * a comparison-based plan. In practice more types support comparison
1528 : * than hashing.
1529 : */
1530 372 : if (typentry->type_id == RECORDOID)
1531 : {
1532 38 : typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1533 : TCFLAGS_HAVE_FIELD_COMPARE);
1534 : }
1535 334 : else if (typentry->typtype == TYPTYPE_COMPOSITE)
1536 : {
1537 : TupleDesc tupdesc;
1538 : int newflags;
1539 : int i;
1540 :
1541 : /* Fetch composite type's tupdesc if we don't have it already */
1542 334 : if (typentry->tupDesc == NULL)
1543 200 : load_typcache_tupdesc(typentry);
1544 334 : tupdesc = typentry->tupDesc;
1545 :
1546 : /* Must bump the refcount while we do additional catalog lookups */
1547 334 : IncrTupleDescRefCount(tupdesc);
1548 :
1549 : /* Have each property if all non-dropped fields have the property */
1550 334 : newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1551 : TCFLAGS_HAVE_FIELD_COMPARE |
1552 : TCFLAGS_HAVE_FIELD_HASHING |
1553 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1554 3668 : for (i = 0; i < tupdesc->natts; i++)
1555 : {
1556 : TypeCacheEntry *fieldentry;
1557 3460 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1558 :
1559 3460 : if (attr->attisdropped)
1560 0 : continue;
1561 :
1562 3460 : fieldentry = lookup_type_cache(attr->atttypid,
1563 : TYPECACHE_EQ_OPR |
1564 : TYPECACHE_CMP_PROC |
1565 : TYPECACHE_HASH_PROC |
1566 : TYPECACHE_HASH_EXTENDED_PROC);
1567 3460 : if (!OidIsValid(fieldentry->eq_opr))
1568 126 : newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1569 3460 : if (!OidIsValid(fieldentry->cmp_proc))
1570 126 : newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1571 3460 : if (!OidIsValid(fieldentry->hash_proc))
1572 132 : newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1573 3460 : if (!OidIsValid(fieldentry->hash_extended_proc))
1574 132 : newflags &= ~TCFLAGS_HAVE_FIELD_EXTENDED_HASHING;
1575 :
1576 : /* We can drop out of the loop once we disprove all bits */
1577 3460 : if (newflags == 0)
1578 126 : break;
1579 : }
1580 334 : typentry->flags |= newflags;
1581 :
1582 334 : DecrTupleDescRefCount(tupdesc);
1583 : }
1584 0 : else if (typentry->typtype == TYPTYPE_DOMAIN)
1585 : {
1586 : /* If it's domain over composite, copy base type's properties */
1587 : TypeCacheEntry *baseentry;
1588 :
1589 : /* load up basetype info if we didn't already */
1590 0 : if (typentry->domainBaseType == InvalidOid)
1591 : {
1592 0 : typentry->domainBaseTypmod = -1;
1593 0 : typentry->domainBaseType =
1594 0 : getBaseTypeAndTypmod(typentry->type_id,
1595 : &typentry->domainBaseTypmod);
1596 : }
1597 0 : baseentry = lookup_type_cache(typentry->domainBaseType,
1598 : TYPECACHE_EQ_OPR |
1599 : TYPECACHE_CMP_PROC |
1600 : TYPECACHE_HASH_PROC |
1601 : TYPECACHE_HASH_EXTENDED_PROC);
1602 0 : if (baseentry->typtype == TYPTYPE_COMPOSITE)
1603 : {
1604 0 : typentry->flags |= TCFLAGS_DOMAIN_BASE_IS_COMPOSITE;
1605 0 : typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1606 : TCFLAGS_HAVE_FIELD_COMPARE |
1607 : TCFLAGS_HAVE_FIELD_HASHING |
1608 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1609 : }
1610 : }
1611 372 : typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
1612 372 : }
1613 :
1614 : /*
1615 : * Likewise, some helper functions for range and multirange types.
1616 : *
1617 : * We can borrow the flag bits for array element properties to use for range
1618 : * element properties, since those flag bits otherwise have no use in a
1619 : * range or multirange type's typcache entry.
1620 : */
1621 :
1622 : static bool
1623 20 : range_element_has_hashing(TypeCacheEntry *typentry)
1624 : {
1625 20 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1626 20 : cache_range_element_properties(typentry);
1627 20 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1628 : }
1629 :
1630 : static bool
1631 0 : range_element_has_extended_hashing(TypeCacheEntry *typentry)
1632 : {
1633 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1634 0 : cache_range_element_properties(typentry);
1635 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1636 : }
1637 :
1638 : static void
1639 20 : cache_range_element_properties(TypeCacheEntry *typentry)
1640 : {
1641 : /* load up subtype link if we didn't already */
1642 20 : if (typentry->rngelemtype == NULL &&
1643 0 : typentry->typtype == TYPTYPE_RANGE)
1644 0 : load_rangetype_info(typentry);
1645 :
1646 20 : if (typentry->rngelemtype != NULL)
1647 : {
1648 : TypeCacheEntry *elementry;
1649 :
1650 : /* might need to calculate subtype's hash function properties */
1651 20 : elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1652 : TYPECACHE_HASH_PROC |
1653 : TYPECACHE_HASH_EXTENDED_PROC);
1654 20 : if (OidIsValid(elementry->hash_proc))
1655 14 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1656 20 : if (OidIsValid(elementry->hash_extended_proc))
1657 14 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1658 : }
1659 20 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1660 20 : }
1661 :
1662 : static bool
1663 12 : multirange_element_has_hashing(TypeCacheEntry *typentry)
1664 : {
1665 12 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1666 12 : cache_multirange_element_properties(typentry);
1667 12 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1668 : }
1669 :
1670 : static bool
1671 0 : multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
1672 : {
1673 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1674 0 : cache_multirange_element_properties(typentry);
1675 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1676 : }
1677 :
1678 : static void
1679 12 : cache_multirange_element_properties(TypeCacheEntry *typentry)
1680 : {
1681 : /* load up range link if we didn't already */
1682 12 : if (typentry->rngtype == NULL &&
1683 0 : typentry->typtype == TYPTYPE_MULTIRANGE)
1684 0 : load_multirangetype_info(typentry);
1685 :
1686 12 : if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1687 : {
1688 : TypeCacheEntry *elementry;
1689 :
1690 : /* might need to calculate subtype's hash function properties */
1691 12 : elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1692 : TYPECACHE_HASH_PROC |
1693 : TYPECACHE_HASH_EXTENDED_PROC);
1694 12 : if (OidIsValid(elementry->hash_proc))
1695 6 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1696 12 : if (OidIsValid(elementry->hash_extended_proc))
1697 6 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1698 : }
1699 12 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1700 12 : }
1701 :
1702 : /*
1703 : * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1704 : * to store 'typmod'.
1705 : */
1706 : static void
1707 11716 : ensure_record_cache_typmod_slot_exists(int32 typmod)
1708 : {
1709 11716 : if (RecordCacheArray == NULL)
1710 : {
1711 4962 : RecordCacheArray = (RecordCacheArrayEntry *)
1712 4962 : MemoryContextAllocZero(CacheMemoryContext,
1713 : 64 * sizeof(RecordCacheArrayEntry));
1714 4962 : RecordCacheArrayLen = 64;
1715 : }
1716 :
1717 11716 : if (typmod >= RecordCacheArrayLen)
1718 : {
1719 0 : int32 newlen = pg_nextpower2_32(typmod + 1);
1720 :
1721 0 : RecordCacheArray = repalloc0_array(RecordCacheArray,
1722 : RecordCacheArrayEntry,
1723 : RecordCacheArrayLen,
1724 : newlen);
1725 0 : RecordCacheArrayLen = newlen;
1726 : }
1727 11716 : }
1728 :
1729 : /*
1730 : * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1731 : *
1732 : * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1733 : * hasn't had its refcount bumped.
1734 : */
1735 : static TupleDesc
1736 115766 : lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1737 : {
1738 115766 : if (type_id != RECORDOID)
1739 : {
1740 : /*
1741 : * It's a named composite type, so use the regular typcache.
1742 : */
1743 : TypeCacheEntry *typentry;
1744 :
1745 52794 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1746 52794 : if (typentry->tupDesc == NULL && !noError)
1747 0 : ereport(ERROR,
1748 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1749 : errmsg("type %s is not composite",
1750 : format_type_be(type_id))));
1751 52794 : return typentry->tupDesc;
1752 : }
1753 : else
1754 : {
1755 : /*
1756 : * It's a transient record type, so look in our record-type table.
1757 : */
1758 62972 : if (typmod >= 0)
1759 : {
1760 : /* It is already in our local cache? */
1761 62956 : if (typmod < RecordCacheArrayLen &&
1762 62950 : RecordCacheArray[typmod].tupdesc != NULL)
1763 62926 : return RecordCacheArray[typmod].tupdesc;
1764 :
1765 : /* Are we attached to a shared record typmod registry? */
1766 30 : if (CurrentSession->shared_typmod_registry != NULL)
1767 : {
1768 : SharedTypmodTableEntry *entry;
1769 :
1770 : /* Try to find it in the shared typmod index. */
1771 30 : entry = dshash_find(CurrentSession->shared_typmod_table,
1772 : &typmod, false);
1773 30 : if (entry != NULL)
1774 : {
1775 : TupleDesc tupdesc;
1776 :
1777 : tupdesc = (TupleDesc)
1778 30 : dsa_get_address(CurrentSession->area,
1779 : entry->shared_tupdesc);
1780 : Assert(typmod == tupdesc->tdtypmod);
1781 :
1782 : /* We may need to extend the local RecordCacheArray. */
1783 30 : ensure_record_cache_typmod_slot_exists(typmod);
1784 :
1785 : /*
1786 : * Our local array can now point directly to the TupleDesc
1787 : * in shared memory, which is non-reference-counted.
1788 : */
1789 30 : RecordCacheArray[typmod].tupdesc = tupdesc;
1790 : Assert(tupdesc->tdrefcount == -1);
1791 :
1792 : /*
1793 : * We don't share tupdesc identifiers across processes, so
1794 : * assign one locally.
1795 : */
1796 30 : RecordCacheArray[typmod].id = ++tupledesc_id_counter;
1797 :
1798 30 : dshash_release_lock(CurrentSession->shared_typmod_table,
1799 : entry);
1800 :
1801 30 : return RecordCacheArray[typmod].tupdesc;
1802 : }
1803 : }
1804 : }
1805 :
1806 16 : if (!noError)
1807 0 : ereport(ERROR,
1808 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1809 : errmsg("record type has not been registered")));
1810 16 : return NULL;
1811 : }
1812 : }
1813 :
1814 : /*
1815 : * lookup_rowtype_tupdesc
1816 : *
1817 : * Given a typeid/typmod that should describe a known composite type,
1818 : * return the tuple descriptor for the type. Will ereport on failure.
1819 : * (Use ereport because this is reachable with user-specified OIDs,
1820 : * for example from record_in().)
1821 : *
1822 : * Note: on success, we increment the refcount of the returned TupleDesc,
1823 : * and log the reference in CurrentResourceOwner. Caller must call
1824 : * ReleaseTupleDesc when done using the tupdesc. (There are some
1825 : * cases in which the returned tupdesc is not refcounted, in which
1826 : * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1827 : * the tupdesc is guaranteed to live till process exit.)
1828 : */
1829 : TupleDesc
1830 70726 : lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1831 : {
1832 : TupleDesc tupDesc;
1833 :
1834 70726 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1835 70726 : PinTupleDesc(tupDesc);
1836 70726 : return tupDesc;
1837 : }
1838 :
1839 : /*
1840 : * lookup_rowtype_tupdesc_noerror
1841 : *
1842 : * As above, but if the type is not a known composite type and noError
1843 : * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1844 : * type_id is passed, you'll get an ereport anyway.)
1845 : */
1846 : TupleDesc
1847 20 : lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1848 : {
1849 : TupleDesc tupDesc;
1850 :
1851 20 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1852 20 : if (tupDesc != NULL)
1853 20 : PinTupleDesc(tupDesc);
1854 20 : return tupDesc;
1855 : }
1856 :
1857 : /*
1858 : * lookup_rowtype_tupdesc_copy
1859 : *
1860 : * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1861 : * copied into the CurrentMemoryContext and is not reference-counted.
1862 : */
1863 : TupleDesc
1864 45002 : lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1865 : {
1866 : TupleDesc tmp;
1867 :
1868 45002 : tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1869 45002 : return CreateTupleDescCopyConstr(tmp);
1870 : }
1871 :
1872 : /*
1873 : * lookup_rowtype_tupdesc_domain
1874 : *
1875 : * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1876 : * a domain over a named composite type; so this is effectively equivalent to
1877 : * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1878 : * except for being a tad faster.
1879 : *
1880 : * Note: the reason we don't fold the look-through-domain behavior into plain
1881 : * lookup_rowtype_tupdesc() is that we want callers to know they might be
1882 : * dealing with a domain. Otherwise they might construct a tuple that should
1883 : * be of the domain type, but not apply domain constraints.
1884 : */
1885 : TupleDesc
1886 2066 : lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1887 : {
1888 : TupleDesc tupDesc;
1889 :
1890 2066 : if (type_id != RECORDOID)
1891 : {
1892 : /*
1893 : * Check for domain or named composite type. We might as well load
1894 : * whichever data is needed.
1895 : */
1896 : TypeCacheEntry *typentry;
1897 :
1898 2048 : typentry = lookup_type_cache(type_id,
1899 : TYPECACHE_TUPDESC |
1900 : TYPECACHE_DOMAIN_BASE_INFO);
1901 2048 : if (typentry->typtype == TYPTYPE_DOMAIN)
1902 20 : return lookup_rowtype_tupdesc_noerror(typentry->domainBaseType,
1903 : typentry->domainBaseTypmod,
1904 : noError);
1905 2028 : if (typentry->tupDesc == NULL && !noError)
1906 0 : ereport(ERROR,
1907 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1908 : errmsg("type %s is not composite",
1909 : format_type_be(type_id))));
1910 2028 : tupDesc = typentry->tupDesc;
1911 : }
1912 : else
1913 18 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1914 2046 : if (tupDesc != NULL)
1915 2030 : PinTupleDesc(tupDesc);
1916 2046 : return tupDesc;
1917 : }
1918 :
1919 : /*
1920 : * Hash function for the hash table of RecordCacheEntry.
1921 : */
1922 : static uint32
1923 292708 : record_type_typmod_hash(const void *data, size_t size)
1924 : {
1925 292708 : RecordCacheEntry *entry = (RecordCacheEntry *) data;
1926 :
1927 292708 : return hashTupleDesc(entry->tupdesc);
1928 : }
1929 :
1930 : /*
1931 : * Match function for the hash table of RecordCacheEntry.
1932 : */
1933 : static int
1934 272260 : record_type_typmod_compare(const void *a, const void *b, size_t size)
1935 : {
1936 272260 : RecordCacheEntry *left = (RecordCacheEntry *) a;
1937 272260 : RecordCacheEntry *right = (RecordCacheEntry *) b;
1938 :
1939 272260 : return equalTupleDescs(left->tupdesc, right->tupdesc) ? 0 : 1;
1940 : }
1941 :
1942 : /*
1943 : * assign_record_type_typmod
1944 : *
1945 : * Given a tuple descriptor for a RECORD type, find or create a cache entry
1946 : * for the type, and set the tupdesc's tdtypmod field to a value that will
1947 : * identify this cache entry to lookup_rowtype_tupdesc.
1948 : */
1949 : void
1950 281022 : assign_record_type_typmod(TupleDesc tupDesc)
1951 : {
1952 : RecordCacheEntry *recentry;
1953 : TupleDesc entDesc;
1954 : bool found;
1955 : MemoryContext oldcxt;
1956 :
1957 : Assert(tupDesc->tdtypeid == RECORDOID);
1958 :
1959 281022 : if (RecordCacheHash == NULL)
1960 : {
1961 : /* First time through: initialize the hash table */
1962 : HASHCTL ctl;
1963 :
1964 4962 : ctl.keysize = sizeof(TupleDesc); /* just the pointer */
1965 4962 : ctl.entrysize = sizeof(RecordCacheEntry);
1966 4962 : ctl.hash = record_type_typmod_hash;
1967 4962 : ctl.match = record_type_typmod_compare;
1968 4962 : RecordCacheHash = hash_create("Record information cache", 64,
1969 : &ctl,
1970 : HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
1971 :
1972 : /* Also make sure CacheMemoryContext exists */
1973 4962 : if (!CacheMemoryContext)
1974 0 : CreateCacheMemoryContext();
1975 : }
1976 :
1977 : /*
1978 : * Find a hashtable entry for this tuple descriptor. We don't use
1979 : * HASH_ENTER yet, because if it's missing, we need to make sure that all
1980 : * the allocations succeed before we create the new entry.
1981 : */
1982 281022 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
1983 : &tupDesc,
1984 : HASH_FIND, &found);
1985 281022 : if (found && recentry->tupdesc != NULL)
1986 : {
1987 269336 : tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
1988 269336 : return;
1989 : }
1990 :
1991 : /* Not present, so need to manufacture an entry */
1992 11686 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1993 :
1994 : /* Look in the SharedRecordTypmodRegistry, if attached */
1995 11686 : entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
1996 11686 : if (entDesc == NULL)
1997 : {
1998 : /*
1999 : * Make sure we have room before we CreateTupleDescCopy() or advance
2000 : * NextRecordTypmod.
2001 : */
2002 11632 : ensure_record_cache_typmod_slot_exists(NextRecordTypmod);
2003 :
2004 : /* Reference-counted local cache only. */
2005 11632 : entDesc = CreateTupleDescCopy(tupDesc);
2006 11632 : entDesc->tdrefcount = 1;
2007 11632 : entDesc->tdtypmod = NextRecordTypmod++;
2008 : }
2009 : else
2010 : {
2011 54 : ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod);
2012 : }
2013 :
2014 11686 : RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
2015 :
2016 : /* Assign a unique tupdesc identifier, too. */
2017 11686 : RecordCacheArray[entDesc->tdtypmod].id = ++tupledesc_id_counter;
2018 :
2019 : /* Fully initialized; create the hash table entry */
2020 11686 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2021 : &tupDesc,
2022 : HASH_ENTER, NULL);
2023 11686 : recentry->tupdesc = entDesc;
2024 :
2025 : /* Update the caller's tuple descriptor. */
2026 11686 : tupDesc->tdtypmod = entDesc->tdtypmod;
2027 :
2028 11686 : MemoryContextSwitchTo(oldcxt);
2029 : }
2030 :
2031 : /*
2032 : * assign_record_type_identifier
2033 : *
2034 : * Get an identifier, which will be unique over the lifespan of this backend
2035 : * process, for the current tuple descriptor of the specified composite type.
2036 : * For named composite types, the value is guaranteed to change if the type's
2037 : * definition does. For registered RECORD types, the value will not change
2038 : * once assigned, since the registered type won't either. If an anonymous
2039 : * RECORD type is specified, we return a new identifier on each call.
2040 : */
2041 : uint64
2042 5344 : assign_record_type_identifier(Oid type_id, int32 typmod)
2043 : {
2044 5344 : if (type_id != RECORDOID)
2045 : {
2046 : /*
2047 : * It's a named composite type, so use the regular typcache.
2048 : */
2049 : TypeCacheEntry *typentry;
2050 :
2051 0 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2052 0 : if (typentry->tupDesc == NULL)
2053 0 : ereport(ERROR,
2054 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2055 : errmsg("type %s is not composite",
2056 : format_type_be(type_id))));
2057 : Assert(typentry->tupDesc_identifier != 0);
2058 0 : return typentry->tupDesc_identifier;
2059 : }
2060 : else
2061 : {
2062 : /*
2063 : * It's a transient record type, so look in our record-type table.
2064 : */
2065 5344 : if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2066 56 : RecordCacheArray[typmod].tupdesc != NULL)
2067 : {
2068 : Assert(RecordCacheArray[typmod].id != 0);
2069 56 : return RecordCacheArray[typmod].id;
2070 : }
2071 :
2072 : /* For anonymous or unrecognized record type, generate a new ID */
2073 5288 : return ++tupledesc_id_counter;
2074 : }
2075 : }
2076 :
2077 : /*
2078 : * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2079 : * This exists only to avoid exposing private innards of
2080 : * SharedRecordTypmodRegistry in a header.
2081 : */
2082 : size_t
2083 108 : SharedRecordTypmodRegistryEstimate(void)
2084 : {
2085 108 : return sizeof(SharedRecordTypmodRegistry);
2086 : }
2087 :
2088 : /*
2089 : * Initialize 'registry' in a pre-existing shared memory region, which must be
2090 : * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2091 : * bytes.
2092 : *
2093 : * 'area' will be used to allocate shared memory space as required for the
2094 : * typemod registration. The current process, expected to be a leader process
2095 : * in a parallel query, will be attached automatically and its current record
2096 : * types will be loaded into *registry. While attached, all calls to
2097 : * assign_record_type_typmod will use the shared registry. Worker backends
2098 : * will need to attach explicitly.
2099 : *
2100 : * Note that this function takes 'area' and 'segment' as arguments rather than
2101 : * accessing them via CurrentSession, because they aren't installed there
2102 : * until after this function runs.
2103 : */
2104 : void
2105 108 : SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry,
2106 : dsm_segment *segment,
2107 : dsa_area *area)
2108 : {
2109 : MemoryContext old_context;
2110 : dshash_table *record_table;
2111 : dshash_table *typmod_table;
2112 : int32 typmod;
2113 :
2114 : Assert(!IsParallelWorker());
2115 :
2116 : /* We can't already be attached to a shared registry. */
2117 : Assert(CurrentSession->shared_typmod_registry == NULL);
2118 : Assert(CurrentSession->shared_record_table == NULL);
2119 : Assert(CurrentSession->shared_typmod_table == NULL);
2120 :
2121 108 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2122 :
2123 : /* Create the hash table of tuple descriptors indexed by themselves. */
2124 108 : record_table = dshash_create(area, &srtr_record_table_params, area);
2125 :
2126 : /* Create the hash table of tuple descriptors indexed by typmod. */
2127 108 : typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2128 :
2129 108 : MemoryContextSwitchTo(old_context);
2130 :
2131 : /* Initialize the SharedRecordTypmodRegistry. */
2132 108 : registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2133 108 : registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2134 108 : pg_atomic_init_u32(®istry->next_typmod, NextRecordTypmod);
2135 :
2136 : /*
2137 : * Copy all entries from this backend's private registry into the shared
2138 : * registry.
2139 : */
2140 120 : for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2141 : {
2142 : SharedTypmodTableEntry *typmod_table_entry;
2143 : SharedRecordTableEntry *record_table_entry;
2144 : SharedRecordTableKey record_table_key;
2145 : dsa_pointer shared_dp;
2146 : TupleDesc tupdesc;
2147 : bool found;
2148 :
2149 12 : tupdesc = RecordCacheArray[typmod].tupdesc;
2150 12 : if (tupdesc == NULL)
2151 0 : continue;
2152 :
2153 : /* Copy the TupleDesc into shared memory. */
2154 12 : shared_dp = share_tupledesc(area, tupdesc, typmod);
2155 :
2156 : /* Insert into the typmod table. */
2157 12 : typmod_table_entry = dshash_find_or_insert(typmod_table,
2158 12 : &tupdesc->tdtypmod,
2159 : &found);
2160 12 : if (found)
2161 0 : elog(ERROR, "cannot create duplicate shared record typmod");
2162 12 : typmod_table_entry->typmod = tupdesc->tdtypmod;
2163 12 : typmod_table_entry->shared_tupdesc = shared_dp;
2164 12 : dshash_release_lock(typmod_table, typmod_table_entry);
2165 :
2166 : /* Insert into the record table. */
2167 12 : record_table_key.shared = false;
2168 12 : record_table_key.u.local_tupdesc = tupdesc;
2169 12 : record_table_entry = dshash_find_or_insert(record_table,
2170 : &record_table_key,
2171 : &found);
2172 12 : if (!found)
2173 : {
2174 12 : record_table_entry->key.shared = true;
2175 12 : record_table_entry->key.u.shared_tupdesc = shared_dp;
2176 : }
2177 12 : dshash_release_lock(record_table, record_table_entry);
2178 : }
2179 :
2180 : /*
2181 : * Set up the global state that will tell assign_record_type_typmod and
2182 : * lookup_rowtype_tupdesc_internal about the shared registry.
2183 : */
2184 108 : CurrentSession->shared_record_table = record_table;
2185 108 : CurrentSession->shared_typmod_table = typmod_table;
2186 108 : CurrentSession->shared_typmod_registry = registry;
2187 :
2188 : /*
2189 : * We install a detach hook in the leader, but only to handle cleanup on
2190 : * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2191 : * the memory, the leader process will use a shared registry until it
2192 : * exits.
2193 : */
2194 108 : on_dsm_detach(segment, shared_record_typmod_registry_detach, (Datum) 0);
2195 108 : }
2196 :
2197 : /*
2198 : * Attach to 'registry', which must have been initialized already by another
2199 : * backend. Future calls to assign_record_type_typmod and
2200 : * lookup_rowtype_tupdesc_internal will use the shared registry until the
2201 : * current session is detached.
2202 : */
2203 : void
2204 2590 : SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
2205 : {
2206 : MemoryContext old_context;
2207 : dshash_table *record_table;
2208 : dshash_table *typmod_table;
2209 :
2210 : Assert(IsParallelWorker());
2211 :
2212 : /* We can't already be attached to a shared registry. */
2213 : Assert(CurrentSession != NULL);
2214 : Assert(CurrentSession->segment != NULL);
2215 : Assert(CurrentSession->area != NULL);
2216 : Assert(CurrentSession->shared_typmod_registry == NULL);
2217 : Assert(CurrentSession->shared_record_table == NULL);
2218 : Assert(CurrentSession->shared_typmod_table == NULL);
2219 :
2220 : /*
2221 : * We can't already have typmods in our local cache, because they'd clash
2222 : * with those imported by SharedRecordTypmodRegistryInit. This should be
2223 : * a freshly started parallel worker. If we ever support worker
2224 : * recycling, a worker would need to zap its local cache in between
2225 : * servicing different queries, in order to be able to call this and
2226 : * synchronize typmods with a new leader; but that's problematic because
2227 : * we can't be very sure that record-typmod-related state hasn't escaped
2228 : * to anywhere else in the process.
2229 : */
2230 : Assert(NextRecordTypmod == 0);
2231 :
2232 2590 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2233 :
2234 : /* Attach to the two hash tables. */
2235 2590 : record_table = dshash_attach(CurrentSession->area,
2236 : &srtr_record_table_params,
2237 : registry->record_table_handle,
2238 2590 : CurrentSession->area);
2239 2590 : typmod_table = dshash_attach(CurrentSession->area,
2240 : &srtr_typmod_table_params,
2241 : registry->typmod_table_handle,
2242 : NULL);
2243 :
2244 2590 : MemoryContextSwitchTo(old_context);
2245 :
2246 : /*
2247 : * Set up detach hook to run at worker exit. Currently this is the same
2248 : * as the leader's detach hook, but in future they might need to be
2249 : * different.
2250 : */
2251 2590 : on_dsm_detach(CurrentSession->segment,
2252 : shared_record_typmod_registry_detach,
2253 : PointerGetDatum(registry));
2254 :
2255 : /*
2256 : * Set up the session state that will tell assign_record_type_typmod and
2257 : * lookup_rowtype_tupdesc_internal about the shared registry.
2258 : */
2259 2590 : CurrentSession->shared_typmod_registry = registry;
2260 2590 : CurrentSession->shared_record_table = record_table;
2261 2590 : CurrentSession->shared_typmod_table = typmod_table;
2262 2590 : }
2263 :
2264 : /*
2265 : * TypeCacheRelCallback
2266 : * Relcache inval callback function
2267 : *
2268 : * Delete the cached tuple descriptor (if any) for the given rel's composite
2269 : * type, or for all composite types if relid == InvalidOid. Also reset
2270 : * whatever info we have cached about the composite type's comparability.
2271 : *
2272 : * This is called when a relcache invalidation event occurs for the given
2273 : * relid. We must scan the whole typcache hash since we don't know the
2274 : * type OID corresponding to the relid. We could do a direct search if this
2275 : * were a syscache-flush callback on pg_type, but then we would need all
2276 : * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
2277 : * invals against the rel's pg_type OID. The extra SI signaling could very
2278 : * well cost more than we'd save, since in most usages there are not very
2279 : * many entries in a backend's typcache. The risk of bugs-of-omission seems
2280 : * high, too.
2281 : *
2282 : * Another possibility, with only localized impact, is to maintain a second
2283 : * hashtable that indexes composite-type typcache entries by their typrelid.
2284 : * But it's still not clear it's worth the trouble.
2285 : */
2286 : static void
2287 1364818 : TypeCacheRelCallback(Datum arg, Oid relid)
2288 : {
2289 : HASH_SEQ_STATUS status;
2290 : TypeCacheEntry *typentry;
2291 :
2292 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2293 1364818 : hash_seq_init(&status, TypeCacheHash);
2294 14793736 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2295 : {
2296 13428918 : if (typentry->typtype == TYPTYPE_COMPOSITE)
2297 : {
2298 : /* Skip if no match, unless we're zapping all composite types */
2299 3703372 : if (relid != typentry->typrelid && relid != InvalidOid)
2300 3689090 : continue;
2301 :
2302 : /* Delete tupdesc if we have it */
2303 14282 : if (typentry->tupDesc != NULL)
2304 : {
2305 : /*
2306 : * Release our refcount, and free the tupdesc if none remain.
2307 : * (Can't use DecrTupleDescRefCount because this reference is
2308 : * not logged in current resource owner.)
2309 : */
2310 : Assert(typentry->tupDesc->tdrefcount > 0);
2311 2696 : if (--typentry->tupDesc->tdrefcount == 0)
2312 2314 : FreeTupleDesc(typentry->tupDesc);
2313 2696 : typentry->tupDesc = NULL;
2314 :
2315 : /*
2316 : * Also clear tupDesc_identifier, so that anything watching
2317 : * that will realize that the tupdesc has possibly changed.
2318 : * (Alternatively, we could specify that to detect possible
2319 : * tupdesc change, one must check for tupDesc != NULL as well
2320 : * as tupDesc_identifier being the same as what was previously
2321 : * seen. That seems error-prone.)
2322 : */
2323 2696 : typentry->tupDesc_identifier = 0;
2324 : }
2325 :
2326 : /* Reset equality/comparison/hashing validity information */
2327 14282 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2328 : }
2329 9725546 : else if (typentry->typtype == TYPTYPE_DOMAIN)
2330 : {
2331 : /*
2332 : * If it's domain over composite, reset flags. (We don't bother
2333 : * trying to determine whether the specific base type needs a
2334 : * reset.) Note that if we haven't determined whether the base
2335 : * type is composite, we don't need to reset anything.
2336 : */
2337 792496 : if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2338 0 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2339 : }
2340 : }
2341 1364818 : }
2342 :
2343 : /*
2344 : * TypeCacheTypCallback
2345 : * Syscache inval callback function
2346 : *
2347 : * This is called when a syscache invalidation event occurs for any
2348 : * pg_type row. If we have information cached about that type, mark
2349 : * it as needing to be reloaded.
2350 : */
2351 : static void
2352 503868 : TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
2353 : {
2354 : HASH_SEQ_STATUS status;
2355 : TypeCacheEntry *typentry;
2356 :
2357 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2358 503868 : hash_seq_init(&status, TypeCacheHash);
2359 5436980 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2360 : {
2361 : /* Is this the targeted type row (or it's a total cache flush)? */
2362 4933112 : if (hashvalue == 0 || typentry->type_id_hash == hashvalue)
2363 : {
2364 : /*
2365 : * Mark the data obtained directly from pg_type as invalid. Also,
2366 : * if it's a domain, typnotnull might've changed, so we'll need to
2367 : * recalculate its constraints.
2368 : */
2369 5706 : typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2370 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS);
2371 : }
2372 : }
2373 503868 : }
2374 :
2375 : /*
2376 : * TypeCacheOpcCallback
2377 : * Syscache inval callback function
2378 : *
2379 : * This is called when a syscache invalidation event occurs for any pg_opclass
2380 : * row. In principle we could probably just invalidate data dependent on the
2381 : * particular opclass, but since updates on pg_opclass are rare in production
2382 : * it doesn't seem worth a lot of complication: we just mark all cached data
2383 : * invalid.
2384 : *
2385 : * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2386 : * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2387 : * is not allowed to be used to add/drop the primary operators and functions
2388 : * of an opclass, only cross-type members of a family; and the latter sorts
2389 : * of members are not going to get cached here.
2390 : */
2391 : static void
2392 1264 : TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2393 : {
2394 : HASH_SEQ_STATUS status;
2395 : TypeCacheEntry *typentry;
2396 :
2397 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2398 1264 : hash_seq_init(&status, TypeCacheHash);
2399 7914 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2400 : {
2401 : /* Reset equality/comparison/hashing validity information */
2402 6650 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2403 : }
2404 1264 : }
2405 :
2406 : /*
2407 : * TypeCacheConstrCallback
2408 : * Syscache inval callback function
2409 : *
2410 : * This is called when a syscache invalidation event occurs for any
2411 : * pg_constraint row. We flush information about domain constraints
2412 : * when this happens.
2413 : *
2414 : * It's slightly annoying that we can't tell whether the inval event was for
2415 : * a domain constraint record or not; there's usually more update traffic
2416 : * for table constraints than domain constraints, so we'll do a lot of
2417 : * useless flushes. Still, this is better than the old no-caching-at-all
2418 : * approach to domain constraints.
2419 : */
2420 : static void
2421 111684 : TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2422 : {
2423 : TypeCacheEntry *typentry;
2424 :
2425 : /*
2426 : * Because this is called very frequently, and typically very few of the
2427 : * typcache entries are for domains, we don't use hash_seq_search here.
2428 : * Instead we thread all the domain-type entries together so that we can
2429 : * visit them cheaply.
2430 : */
2431 201330 : for (typentry = firstDomainTypeEntry;
2432 : typentry != NULL;
2433 89646 : typentry = typentry->nextDomain)
2434 : {
2435 : /* Reset domain constraint validity information */
2436 89646 : typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
2437 : }
2438 111684 : }
2439 :
2440 :
2441 : /*
2442 : * Check if given OID is part of the subset that's sortable by comparisons
2443 : */
2444 : static inline bool
2445 300114 : enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
2446 : {
2447 : Oid offset;
2448 :
2449 300114 : if (arg < enumdata->bitmap_base)
2450 0 : return false;
2451 300114 : offset = arg - enumdata->bitmap_base;
2452 300114 : if (offset > (Oid) INT_MAX)
2453 0 : return false;
2454 300114 : return bms_is_member((int) offset, enumdata->sorted_values);
2455 : }
2456 :
2457 :
2458 : /*
2459 : * compare_values_of_enum
2460 : * Compare two members of an enum type.
2461 : * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2462 : *
2463 : * Note: currently, the enumData cache is refreshed only if we are asked
2464 : * to compare an enum value that is not already in the cache. This is okay
2465 : * because there is no support for re-ordering existing values, so comparisons
2466 : * of previously cached values will return the right answer even if other
2467 : * values have been added since we last loaded the cache.
2468 : *
2469 : * Note: the enum logic has a special-case rule about even-numbered versus
2470 : * odd-numbered OIDs, but we take no account of that rule here; this
2471 : * routine shouldn't even get called when that rule applies.
2472 : */
2473 : int
2474 150074 : compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
2475 : {
2476 : TypeCacheEnumData *enumdata;
2477 : EnumItem *item1;
2478 : EnumItem *item2;
2479 :
2480 : /*
2481 : * Equal OIDs are certainly equal --- this case was probably handled by
2482 : * our caller, but we may as well check.
2483 : */
2484 150074 : if (arg1 == arg2)
2485 0 : return 0;
2486 :
2487 : /* Load up the cache if first time through */
2488 150074 : if (tcache->enumData == NULL)
2489 8 : load_enum_cache_data(tcache);
2490 150074 : enumdata = tcache->enumData;
2491 :
2492 : /*
2493 : * If both OIDs are known-sorted, we can just compare them directly.
2494 : */
2495 300114 : if (enum_known_sorted(enumdata, arg1) &&
2496 150040 : enum_known_sorted(enumdata, arg2))
2497 : {
2498 0 : if (arg1 < arg2)
2499 0 : return -1;
2500 : else
2501 0 : return 1;
2502 : }
2503 :
2504 : /*
2505 : * Slow path: we have to identify their actual sort-order positions.
2506 : */
2507 150074 : item1 = find_enumitem(enumdata, arg1);
2508 150074 : item2 = find_enumitem(enumdata, arg2);
2509 :
2510 150074 : if (item1 == NULL || item2 == NULL)
2511 : {
2512 : /*
2513 : * We couldn't find one or both values. That means the enum has
2514 : * changed under us, so re-initialize the cache and try again. We
2515 : * don't bother retrying the known-sorted case in this path.
2516 : */
2517 0 : load_enum_cache_data(tcache);
2518 0 : enumdata = tcache->enumData;
2519 :
2520 0 : item1 = find_enumitem(enumdata, arg1);
2521 0 : item2 = find_enumitem(enumdata, arg2);
2522 :
2523 : /*
2524 : * If we still can't find the values, complain: we must have corrupt
2525 : * data.
2526 : */
2527 0 : if (item1 == NULL)
2528 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2529 : arg1, format_type_be(tcache->type_id));
2530 0 : if (item2 == NULL)
2531 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2532 : arg2, format_type_be(tcache->type_id));
2533 : }
2534 :
2535 150074 : if (item1->sort_order < item2->sort_order)
2536 50024 : return -1;
2537 100050 : else if (item1->sort_order > item2->sort_order)
2538 100050 : return 1;
2539 : else
2540 0 : return 0;
2541 : }
2542 :
2543 : /*
2544 : * Load (or re-load) the enumData member of the typcache entry.
2545 : */
2546 : static void
2547 8 : load_enum_cache_data(TypeCacheEntry *tcache)
2548 : {
2549 : TypeCacheEnumData *enumdata;
2550 : Relation enum_rel;
2551 : SysScanDesc enum_scan;
2552 : HeapTuple enum_tuple;
2553 : ScanKeyData skey;
2554 : EnumItem *items;
2555 : int numitems;
2556 : int maxitems;
2557 : Oid bitmap_base;
2558 : Bitmapset *bitmap;
2559 : MemoryContext oldcxt;
2560 : int bm_size,
2561 : start_pos;
2562 :
2563 : /* Check that this is actually an enum */
2564 8 : if (tcache->typtype != TYPTYPE_ENUM)
2565 0 : ereport(ERROR,
2566 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2567 : errmsg("%s is not an enum",
2568 : format_type_be(tcache->type_id))));
2569 :
2570 : /*
2571 : * Read all the information for members of the enum type. We collect the
2572 : * info in working memory in the caller's context, and then transfer it to
2573 : * permanent memory in CacheMemoryContext. This minimizes the risk of
2574 : * leaking memory from CacheMemoryContext in the event of an error partway
2575 : * through.
2576 : */
2577 8 : maxitems = 64;
2578 8 : items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2579 8 : numitems = 0;
2580 :
2581 : /* Scan pg_enum for the members of the target enum type. */
2582 8 : ScanKeyInit(&skey,
2583 : Anum_pg_enum_enumtypid,
2584 : BTEqualStrategyNumber, F_OIDEQ,
2585 : ObjectIdGetDatum(tcache->type_id));
2586 :
2587 8 : enum_rel = table_open(EnumRelationId, AccessShareLock);
2588 8 : enum_scan = systable_beginscan(enum_rel,
2589 : EnumTypIdLabelIndexId,
2590 : true, NULL,
2591 : 1, &skey);
2592 :
2593 64 : while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2594 : {
2595 56 : Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2596 :
2597 56 : if (numitems >= maxitems)
2598 : {
2599 0 : maxitems *= 2;
2600 0 : items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2601 : }
2602 56 : items[numitems].enum_oid = en->oid;
2603 56 : items[numitems].sort_order = en->enumsortorder;
2604 56 : numitems++;
2605 : }
2606 :
2607 8 : systable_endscan(enum_scan);
2608 8 : table_close(enum_rel, AccessShareLock);
2609 :
2610 : /* Sort the items into OID order */
2611 8 : qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2612 :
2613 : /*
2614 : * Here, we create a bitmap listing a subset of the enum's OIDs that are
2615 : * known to be in order and can thus be compared with just OID comparison.
2616 : *
2617 : * The point of this is that the enum's initial OIDs were certainly in
2618 : * order, so there is some subset that can be compared via OID comparison;
2619 : * and we'd rather not do binary searches unnecessarily.
2620 : *
2621 : * This is somewhat heuristic, and might identify a subset of OIDs that
2622 : * isn't exactly what the type started with. That's okay as long as the
2623 : * subset is correctly sorted.
2624 : */
2625 8 : bitmap_base = InvalidOid;
2626 8 : bitmap = NULL;
2627 8 : bm_size = 1; /* only save sets of at least 2 OIDs */
2628 :
2629 20 : for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2630 : {
2631 : /*
2632 : * Identify longest sorted subsequence starting at start_pos
2633 : */
2634 20 : Bitmapset *this_bitmap = bms_make_singleton(0);
2635 20 : int this_bm_size = 1;
2636 20 : Oid start_oid = items[start_pos].enum_oid;
2637 20 : float4 prev_order = items[start_pos].sort_order;
2638 : int i;
2639 :
2640 134 : for (i = start_pos + 1; i < numitems; i++)
2641 : {
2642 : Oid offset;
2643 :
2644 114 : offset = items[i].enum_oid - start_oid;
2645 : /* quit if bitmap would be too large; cutoff is arbitrary */
2646 114 : if (offset >= 8192)
2647 0 : break;
2648 : /* include the item if it's in-order */
2649 114 : if (items[i].sort_order > prev_order)
2650 : {
2651 58 : prev_order = items[i].sort_order;
2652 58 : this_bitmap = bms_add_member(this_bitmap, (int) offset);
2653 58 : this_bm_size++;
2654 : }
2655 : }
2656 :
2657 : /* Remember it if larger than previous best */
2658 20 : if (this_bm_size > bm_size)
2659 : {
2660 8 : bms_free(bitmap);
2661 8 : bitmap_base = start_oid;
2662 8 : bitmap = this_bitmap;
2663 8 : bm_size = this_bm_size;
2664 : }
2665 : else
2666 12 : bms_free(this_bitmap);
2667 :
2668 : /*
2669 : * Done if it's not possible to find a longer sequence in the rest of
2670 : * the list. In typical cases this will happen on the first
2671 : * iteration, which is why we create the bitmaps on the fly instead of
2672 : * doing a second pass over the list.
2673 : */
2674 20 : if (bm_size >= (numitems - start_pos - 1))
2675 8 : break;
2676 : }
2677 :
2678 : /* OK, copy the data into CacheMemoryContext */
2679 8 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2680 : enumdata = (TypeCacheEnumData *)
2681 8 : palloc(offsetof(TypeCacheEnumData, enum_values) +
2682 8 : numitems * sizeof(EnumItem));
2683 8 : enumdata->bitmap_base = bitmap_base;
2684 8 : enumdata->sorted_values = bms_copy(bitmap);
2685 8 : enumdata->num_values = numitems;
2686 8 : memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2687 8 : MemoryContextSwitchTo(oldcxt);
2688 :
2689 8 : pfree(items);
2690 8 : bms_free(bitmap);
2691 :
2692 : /* And link the finished cache struct into the typcache */
2693 8 : if (tcache->enumData != NULL)
2694 0 : pfree(tcache->enumData);
2695 8 : tcache->enumData = enumdata;
2696 8 : }
2697 :
2698 : /*
2699 : * Locate the EnumItem with the given OID, if present
2700 : */
2701 : static EnumItem *
2702 300148 : find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
2703 : {
2704 : EnumItem srch;
2705 :
2706 : /* On some versions of Solaris, bsearch of zero items dumps core */
2707 300148 : if (enumdata->num_values <= 0)
2708 0 : return NULL;
2709 :
2710 300148 : srch.enum_oid = arg;
2711 300148 : return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2712 : sizeof(EnumItem), enum_oid_cmp);
2713 : }
2714 :
2715 : /*
2716 : * qsort comparison function for OID-ordered EnumItems
2717 : */
2718 : static int
2719 600518 : enum_oid_cmp(const void *left, const void *right)
2720 : {
2721 600518 : const EnumItem *l = (const EnumItem *) left;
2722 600518 : const EnumItem *r = (const EnumItem *) right;
2723 :
2724 600518 : if (l->enum_oid < r->enum_oid)
2725 150190 : return -1;
2726 450328 : else if (l->enum_oid > r->enum_oid)
2727 150180 : return 1;
2728 : else
2729 300148 : return 0;
2730 : }
2731 :
2732 : /*
2733 : * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2734 : * to the given value and return a dsa_pointer.
2735 : */
2736 : static dsa_pointer
2737 60 : share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2738 : {
2739 : dsa_pointer shared_dp;
2740 : TupleDesc shared;
2741 :
2742 60 : shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2743 60 : shared = (TupleDesc) dsa_get_address(area, shared_dp);
2744 60 : TupleDescCopy(shared, tupdesc);
2745 60 : shared->tdtypmod = typmod;
2746 :
2747 60 : return shared_dp;
2748 : }
2749 :
2750 : /*
2751 : * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2752 : * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2753 : * Tuple descriptors returned by this function are not reference counted, and
2754 : * will exist at least as long as the current backend remained attached to the
2755 : * current session.
2756 : */
2757 : static TupleDesc
2758 11686 : find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
2759 : {
2760 : TupleDesc result;
2761 : SharedRecordTableKey key;
2762 : SharedRecordTableEntry *record_table_entry;
2763 : SharedTypmodTableEntry *typmod_table_entry;
2764 : dsa_pointer shared_dp;
2765 : bool found;
2766 : uint32 typmod;
2767 :
2768 : /* If not even attached, nothing to do. */
2769 11686 : if (CurrentSession->shared_typmod_registry == NULL)
2770 11632 : return NULL;
2771 :
2772 : /* Try to find a matching tuple descriptor in the record table. */
2773 54 : key.shared = false;
2774 54 : key.u.local_tupdesc = tupdesc;
2775 : record_table_entry = (SharedRecordTableEntry *)
2776 54 : dshash_find(CurrentSession->shared_record_table, &key, false);
2777 54 : if (record_table_entry)
2778 : {
2779 : Assert(record_table_entry->key.shared);
2780 6 : dshash_release_lock(CurrentSession->shared_record_table,
2781 : record_table_entry);
2782 : result = (TupleDesc)
2783 6 : dsa_get_address(CurrentSession->area,
2784 : record_table_entry->key.u.shared_tupdesc);
2785 : Assert(result->tdrefcount == -1);
2786 :
2787 6 : return result;
2788 : }
2789 :
2790 : /* Allocate a new typmod number. This will be wasted if we error out. */
2791 48 : typmod = (int)
2792 48 : pg_atomic_fetch_add_u32(&CurrentSession->shared_typmod_registry->next_typmod,
2793 : 1);
2794 :
2795 : /* Copy the TupleDesc into shared memory. */
2796 48 : shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2797 :
2798 : /*
2799 : * Create an entry in the typmod table so that others will understand this
2800 : * typmod number.
2801 : */
2802 48 : PG_TRY();
2803 : {
2804 : typmod_table_entry = (SharedTypmodTableEntry *)
2805 48 : dshash_find_or_insert(CurrentSession->shared_typmod_table,
2806 : &typmod, &found);
2807 48 : if (found)
2808 0 : elog(ERROR, "cannot create duplicate shared record typmod");
2809 : }
2810 0 : PG_CATCH();
2811 : {
2812 0 : dsa_free(CurrentSession->area, shared_dp);
2813 0 : PG_RE_THROW();
2814 : }
2815 48 : PG_END_TRY();
2816 48 : typmod_table_entry->typmod = typmod;
2817 48 : typmod_table_entry->shared_tupdesc = shared_dp;
2818 48 : dshash_release_lock(CurrentSession->shared_typmod_table,
2819 : typmod_table_entry);
2820 :
2821 : /*
2822 : * Finally create an entry in the record table so others with matching
2823 : * tuple descriptors can reuse the typmod.
2824 : */
2825 : record_table_entry = (SharedRecordTableEntry *)
2826 48 : dshash_find_or_insert(CurrentSession->shared_record_table, &key,
2827 : &found);
2828 48 : if (found)
2829 : {
2830 : /*
2831 : * Someone concurrently inserted a matching tuple descriptor since the
2832 : * first time we checked. Use that one instead.
2833 : */
2834 0 : dshash_release_lock(CurrentSession->shared_record_table,
2835 : record_table_entry);
2836 :
2837 : /* Might as well free up the space used by the one we created. */
2838 0 : found = dshash_delete_key(CurrentSession->shared_typmod_table,
2839 : &typmod);
2840 : Assert(found);
2841 0 : dsa_free(CurrentSession->area, shared_dp);
2842 :
2843 : /* Return the one we found. */
2844 : Assert(record_table_entry->key.shared);
2845 : result = (TupleDesc)
2846 0 : dsa_get_address(CurrentSession->area,
2847 : record_table_entry->key.u.shared_tupdesc);
2848 : Assert(result->tdrefcount == -1);
2849 :
2850 0 : return result;
2851 : }
2852 :
2853 : /* Store it and return it. */
2854 48 : record_table_entry->key.shared = true;
2855 48 : record_table_entry->key.u.shared_tupdesc = shared_dp;
2856 48 : dshash_release_lock(CurrentSession->shared_record_table,
2857 : record_table_entry);
2858 : result = (TupleDesc)
2859 48 : dsa_get_address(CurrentSession->area, shared_dp);
2860 : Assert(result->tdrefcount == -1);
2861 :
2862 48 : return result;
2863 : }
2864 :
2865 : /*
2866 : * On-DSM-detach hook to forget about the current shared record typmod
2867 : * infrastructure. This is currently used by both leader and workers.
2868 : */
2869 : static void
2870 2698 : shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
2871 : {
2872 : /* Be cautious here: maybe we didn't finish initializing. */
2873 2698 : if (CurrentSession->shared_record_table != NULL)
2874 : {
2875 2698 : dshash_detach(CurrentSession->shared_record_table);
2876 2698 : CurrentSession->shared_record_table = NULL;
2877 : }
2878 2698 : if (CurrentSession->shared_typmod_table != NULL)
2879 : {
2880 2698 : dshash_detach(CurrentSession->shared_typmod_table);
2881 2698 : CurrentSession->shared_typmod_table = NULL;
2882 : }
2883 2698 : CurrentSession->shared_typmod_registry = NULL;
2884 2698 : }
|