Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * typcache.c
4 : * POSTGRES type cache code
5 : *
6 : * The type cache exists to speed lookup of certain information about data
7 : * types that is not directly available from a type's pg_type row. For
8 : * example, we use a type's default btree opclass, or the default hash
9 : * opclass if no btree opclass exists, to determine which operators should
10 : * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 : *
12 : * Several seemingly-odd choices have been made to support use of the type
13 : * cache by generic array and record handling routines, such as array_eq(),
14 : * record_cmp(), and hash_array(). Because those routines are used as index
15 : * support operations, they cannot leak memory. To allow them to execute
16 : * efficiently, all information that they would like to re-use across calls
17 : * is kept in the type cache.
18 : *
19 : * Once created, a type cache entry lives as long as the backend does, so
20 : * there is no need for a call to release a cache entry. If the type is
21 : * dropped, the cache entry simply becomes wasted storage. This is not
22 : * expected to happen often, and assuming that typcache entries are good
23 : * permanently allows caching pointers to them in long-lived places.
24 : *
25 : * We have some provisions for updating cache entries if the stored data
26 : * becomes obsolete. Core data extracted from the pg_type row is updated
27 : * when we detect updates to pg_type. Information dependent on opclasses is
28 : * cleared if we detect updates to pg_opclass. We also support clearing the
29 : * tuple descriptor and operator/function parts of a rowtype's cache entry,
30 : * since those may need to change as a consequence of ALTER TABLE. Domain
31 : * constraint changes are also tracked properly.
32 : *
33 : *
34 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
35 : * Portions Copyright (c) 1994, Regents of the University of California
36 : *
37 : * IDENTIFICATION
38 : * src/backend/utils/cache/typcache.c
39 : *
40 : *-------------------------------------------------------------------------
41 : */
42 : #include "postgres.h"
43 :
44 : #include <limits.h>
45 :
46 : #include "access/hash.h"
47 : #include "access/htup_details.h"
48 : #include "access/nbtree.h"
49 : #include "access/parallel.h"
50 : #include "access/relation.h"
51 : #include "access/session.h"
52 : #include "access/table.h"
53 : #include "catalog/pg_am.h"
54 : #include "catalog/pg_constraint.h"
55 : #include "catalog/pg_enum.h"
56 : #include "catalog/pg_operator.h"
57 : #include "catalog/pg_range.h"
58 : #include "catalog/pg_type.h"
59 : #include "commands/defrem.h"
60 : #include "common/int.h"
61 : #include "executor/executor.h"
62 : #include "lib/dshash.h"
63 : #include "optimizer/optimizer.h"
64 : #include "port/pg_bitutils.h"
65 : #include "storage/lwlock.h"
66 : #include "utils/builtins.h"
67 : #include "utils/catcache.h"
68 : #include "utils/fmgroids.h"
69 : #include "utils/injection_point.h"
70 : #include "utils/inval.h"
71 : #include "utils/lsyscache.h"
72 : #include "utils/memutils.h"
73 : #include "utils/rel.h"
74 : #include "utils/syscache.h"
75 : #include "utils/typcache.h"
76 :
77 :
78 : /* The main type cache hashtable searched by lookup_type_cache */
79 : static HTAB *TypeCacheHash = NULL;
80 :
81 : /*
82 : * The mapping of relation's OID to the corresponding composite type OID.
83 : * We're keeping the map entry when the corresponding typentry has something
84 : * to clear i.e it has either TCFLAGS_HAVE_PG_TYPE_DATA, or
85 : * TCFLAGS_OPERATOR_FLAGS, or tupdesc.
86 : */
87 : static HTAB *RelIdToTypeIdCacheHash = NULL;
88 :
89 : typedef struct RelIdToTypeIdCacheEntry
90 : {
91 : Oid relid; /* OID of the relation */
92 : Oid composite_typid; /* OID of the relation's composite type */
93 : } RelIdToTypeIdCacheEntry;
94 :
95 : /* List of type cache entries for domain types */
96 : static TypeCacheEntry *firstDomainTypeEntry = NULL;
97 :
98 : /* Private flag bits in the TypeCacheEntry.flags field */
99 : #define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
100 : #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
101 : #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
102 : #define TCFLAGS_CHECKED_EQ_OPR 0x000008
103 : #define TCFLAGS_CHECKED_LT_OPR 0x000010
104 : #define TCFLAGS_CHECKED_GT_OPR 0x000020
105 : #define TCFLAGS_CHECKED_CMP_PROC 0x000040
106 : #define TCFLAGS_CHECKED_HASH_PROC 0x000080
107 : #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
108 : #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
109 : #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
110 : #define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
111 : #define TCFLAGS_HAVE_ELEM_HASHING 0x001000
112 : #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
113 : #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
114 : #define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
115 : #define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
116 : #define TCFLAGS_HAVE_FIELD_HASHING 0x020000
117 : #define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
118 : #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
119 : #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
120 :
121 : /* The flags associated with equality/comparison/hashing are all but these: */
122 : #define TCFLAGS_OPERATOR_FLAGS \
123 : (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
124 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
125 : TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
126 :
127 : /*
128 : * Data stored about a domain type's constraints. Note that we do not create
129 : * this struct for the common case of a constraint-less domain; we just set
130 : * domainData to NULL to indicate that.
131 : *
132 : * Within a DomainConstraintCache, we store expression plan trees, but the
133 : * check_exprstate fields of the DomainConstraintState nodes are just NULL.
134 : * When needed, expression evaluation nodes are built by flat-copying the
135 : * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
136 : * Such a node tree is not part of the DomainConstraintCache, but is
137 : * considered to belong to a DomainConstraintRef.
138 : */
139 : struct DomainConstraintCache
140 : {
141 : List *constraints; /* list of DomainConstraintState nodes */
142 : MemoryContext dccContext; /* memory context holding all associated data */
143 : long dccRefCount; /* number of references to this struct */
144 : };
145 :
146 : /* Private information to support comparisons of enum values */
147 : typedef struct
148 : {
149 : Oid enum_oid; /* OID of one enum value */
150 : float4 sort_order; /* its sort position */
151 : } EnumItem;
152 :
153 : typedef struct TypeCacheEnumData
154 : {
155 : Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
156 : Bitmapset *sorted_values; /* Set of OIDs known to be in order */
157 : int num_values; /* total number of values in enum */
158 : EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER];
159 : } TypeCacheEnumData;
160 :
161 : /*
162 : * We use a separate table for storing the definitions of non-anonymous
163 : * record types. Once defined, a record type will be remembered for the
164 : * life of the backend. Subsequent uses of the "same" record type (where
165 : * sameness means equalRowTypes) will refer to the existing table entry.
166 : *
167 : * Stored record types are remembered in a linear array of TupleDescs,
168 : * which can be indexed quickly with the assigned typmod. There is also
169 : * a hash table to speed searches for matching TupleDescs.
170 : */
171 :
172 : typedef struct RecordCacheEntry
173 : {
174 : TupleDesc tupdesc;
175 : } RecordCacheEntry;
176 :
177 : /*
178 : * To deal with non-anonymous record types that are exchanged by backends
179 : * involved in a parallel query, we also need a shared version of the above.
180 : */
181 : struct SharedRecordTypmodRegistry
182 : {
183 : /* A hash table for finding a matching TupleDesc. */
184 : dshash_table_handle record_table_handle;
185 : /* A hash table for finding a TupleDesc by typmod. */
186 : dshash_table_handle typmod_table_handle;
187 : /* A source of new record typmod numbers. */
188 : pg_atomic_uint32 next_typmod;
189 : };
190 :
191 : /*
192 : * When using shared tuple descriptors as hash table keys we need a way to be
193 : * able to search for an equal shared TupleDesc using a backend-local
194 : * TupleDesc. So we use this type which can hold either, and hash and compare
195 : * functions that know how to handle both.
196 : */
197 : typedef struct SharedRecordTableKey
198 : {
199 : union
200 : {
201 : TupleDesc local_tupdesc;
202 : dsa_pointer shared_tupdesc;
203 : } u;
204 : bool shared;
205 : } SharedRecordTableKey;
206 :
207 : /*
208 : * The shared version of RecordCacheEntry. This lets us look up a typmod
209 : * using a TupleDesc which may be in local or shared memory.
210 : */
211 : typedef struct SharedRecordTableEntry
212 : {
213 : SharedRecordTableKey key;
214 : } SharedRecordTableEntry;
215 :
216 : /*
217 : * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
218 : * up a TupleDesc in shared memory using a typmod.
219 : */
220 : typedef struct SharedTypmodTableEntry
221 : {
222 : uint32 typmod;
223 : dsa_pointer shared_tupdesc;
224 : } SharedTypmodTableEntry;
225 :
226 : static Oid *in_progress_list;
227 : static int in_progress_list_len;
228 : static int in_progress_list_maxlen;
229 :
230 : /*
231 : * A comparator function for SharedRecordTableKey.
232 : */
233 : static int
234 133 : shared_record_table_compare(const void *a, const void *b, size_t size,
235 : void *arg)
236 : {
237 133 : dsa_area *area = (dsa_area *) arg;
238 133 : const SharedRecordTableKey *k1 = a;
239 133 : const SharedRecordTableKey *k2 = b;
240 : TupleDesc t1;
241 : TupleDesc t2;
242 :
243 133 : if (k1->shared)
244 0 : t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
245 : else
246 133 : t1 = k1->u.local_tupdesc;
247 :
248 133 : if (k2->shared)
249 133 : t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
250 : else
251 0 : t2 = k2->u.local_tupdesc;
252 :
253 133 : return equalRowTypes(t1, t2) ? 0 : 1;
254 : }
255 :
256 : /*
257 : * A hash function for SharedRecordTableKey.
258 : */
259 : static uint32
260 318 : shared_record_table_hash(const void *a, size_t size, void *arg)
261 : {
262 318 : dsa_area *area = arg;
263 318 : const SharedRecordTableKey *k = a;
264 : TupleDesc t;
265 :
266 318 : if (k->shared)
267 0 : t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
268 : else
269 318 : t = k->u.local_tupdesc;
270 :
271 318 : return hashRowType(t);
272 : }
273 :
274 : /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
275 : static const dshash_parameters srtr_record_table_params = {
276 : sizeof(SharedRecordTableKey), /* unused */
277 : sizeof(SharedRecordTableEntry),
278 : shared_record_table_compare,
279 : shared_record_table_hash,
280 : dshash_memcpy,
281 : LWTRANCHE_PER_SESSION_RECORD_TYPE
282 : };
283 :
284 : /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
285 : static const dshash_parameters srtr_typmod_table_params = {
286 : sizeof(uint32),
287 : sizeof(SharedTypmodTableEntry),
288 : dshash_memcmp,
289 : dshash_memhash,
290 : dshash_memcpy,
291 : LWTRANCHE_PER_SESSION_RECORD_TYPMOD
292 : };
293 :
294 : /* hashtable for recognizing registered record types */
295 : static HTAB *RecordCacheHash = NULL;
296 :
297 : typedef struct RecordCacheArrayEntry
298 : {
299 : uint64 id;
300 : TupleDesc tupdesc;
301 : } RecordCacheArrayEntry;
302 :
303 : /* array of info about registered record types, indexed by assigned typmod */
304 : static RecordCacheArrayEntry *RecordCacheArray = NULL;
305 : static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
306 : static int32 NextRecordTypmod = 0; /* number of entries used */
307 :
308 : /*
309 : * Process-wide counter for generating unique tupledesc identifiers.
310 : * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
311 : * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
312 : */
313 : static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER;
314 :
315 : static void load_typcache_tupdesc(TypeCacheEntry *typentry);
316 : static void load_rangetype_info(TypeCacheEntry *typentry);
317 : static void load_multirangetype_info(TypeCacheEntry *typentry);
318 : static void load_domaintype_info(TypeCacheEntry *typentry);
319 : static int dcs_cmp(const void *a, const void *b);
320 : static void decr_dcc_refcount(DomainConstraintCache *dcc);
321 : static void dccref_deletion_callback(void *arg);
322 : static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
323 : static bool array_element_has_equality(TypeCacheEntry *typentry);
324 : static bool array_element_has_compare(TypeCacheEntry *typentry);
325 : static bool array_element_has_hashing(TypeCacheEntry *typentry);
326 : static bool array_element_has_extended_hashing(TypeCacheEntry *typentry);
327 : static void cache_array_element_properties(TypeCacheEntry *typentry);
328 : static bool record_fields_have_equality(TypeCacheEntry *typentry);
329 : static bool record_fields_have_compare(TypeCacheEntry *typentry);
330 : static bool record_fields_have_hashing(TypeCacheEntry *typentry);
331 : static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry);
332 : static void cache_record_field_properties(TypeCacheEntry *typentry);
333 : static bool range_element_has_hashing(TypeCacheEntry *typentry);
334 : static bool range_element_has_extended_hashing(TypeCacheEntry *typentry);
335 : static void cache_range_element_properties(TypeCacheEntry *typentry);
336 : static bool multirange_element_has_hashing(TypeCacheEntry *typentry);
337 : static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry);
338 : static void cache_multirange_element_properties(TypeCacheEntry *typentry);
339 : static void TypeCacheRelCallback(Datum arg, Oid relid);
340 : static void TypeCacheTypCallback(Datum arg, SysCacheIdentifier cacheid,
341 : uint32 hashvalue);
342 : static void TypeCacheOpcCallback(Datum arg, SysCacheIdentifier cacheid,
343 : uint32 hashvalue);
344 : static void TypeCacheConstrCallback(Datum arg, SysCacheIdentifier cacheid,
345 : uint32 hashvalue);
346 : static void load_enum_cache_data(TypeCacheEntry *tcache);
347 : static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
348 : static int enum_oid_cmp(const void *left, const void *right);
349 : static void shared_record_typmod_registry_detach(dsm_segment *segment,
350 : Datum datum);
351 : static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc);
352 : static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
353 : uint32 typmod);
354 : static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry);
355 : static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry);
356 :
357 :
358 : /*
359 : * Hash function compatible with one-arg system cache hash function.
360 : */
361 : static uint32
362 579497 : type_cache_syshash(const void *key, Size keysize)
363 : {
364 : Assert(keysize == sizeof(Oid));
365 579497 : return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
366 : }
367 :
368 : /*
369 : * lookup_type_cache
370 : *
371 : * Fetch the type cache entry for the specified datatype, and make sure that
372 : * all the fields requested by bits in 'flags' are valid.
373 : *
374 : * The result is never NULL --- we will ereport() if the passed type OID is
375 : * invalid. Note however that we may fail to find one or more of the
376 : * values requested by 'flags'; the caller needs to check whether the fields
377 : * are InvalidOid or not.
378 : *
379 : * Note that while filling TypeCacheEntry we might process concurrent
380 : * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
381 : * invalidated. In this case, we typically only clear flags while values are
382 : * still available for the caller. It's expected that the caller holds
383 : * enough locks on type-depending objects that the values are still relevant.
384 : * It's also important that the tupdesc is filled after all other
385 : * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
386 : * invalidated during the lookup_type_cache() call.
387 : */
388 : TypeCacheEntry *
389 530507 : lookup_type_cache(Oid type_id, int flags)
390 : {
391 : TypeCacheEntry *typentry;
392 : bool found;
393 : int in_progress_offset;
394 :
395 530507 : if (TypeCacheHash == NULL)
396 : {
397 : /* First time through: initialize the hash table */
398 : HASHCTL ctl;
399 : int allocsize;
400 :
401 4479 : ctl.keysize = sizeof(Oid);
402 4479 : ctl.entrysize = sizeof(TypeCacheEntry);
403 :
404 : /*
405 : * TypeCacheEntry takes hash value from the system cache. For
406 : * TypeCacheHash we use the same hash in order to speedup search by
407 : * hash value. This is used by hash_seq_init_with_hash_value().
408 : */
409 4479 : ctl.hash = type_cache_syshash;
410 :
411 4479 : TypeCacheHash = hash_create("Type information cache", 64,
412 : &ctl, HASH_ELEM | HASH_FUNCTION);
413 :
414 : Assert(RelIdToTypeIdCacheHash == NULL);
415 :
416 4479 : ctl.keysize = sizeof(Oid);
417 4479 : ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
418 4479 : RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
419 : &ctl, HASH_ELEM | HASH_BLOBS);
420 :
421 : /* Also set up callbacks for SI invalidations */
422 4479 : CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
423 4479 : CacheRegisterSyscacheCallback(TYPEOID, TypeCacheTypCallback, (Datum) 0);
424 4479 : CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
425 4479 : CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
426 :
427 : /* Also make sure CacheMemoryContext exists */
428 4479 : if (!CacheMemoryContext)
429 0 : CreateCacheMemoryContext();
430 :
431 : /*
432 : * reserve enough in_progress_list slots for many cases
433 : */
434 4479 : allocsize = 4;
435 4479 : in_progress_list =
436 4479 : MemoryContextAlloc(CacheMemoryContext,
437 : allocsize * sizeof(*in_progress_list));
438 4479 : in_progress_list_maxlen = allocsize;
439 : }
440 :
441 : Assert(TypeCacheHash != NULL && RelIdToTypeIdCacheHash != NULL);
442 :
443 : /* Register to catch invalidation messages */
444 530507 : if (in_progress_list_len >= in_progress_list_maxlen)
445 : {
446 : int allocsize;
447 :
448 0 : allocsize = in_progress_list_maxlen * 2;
449 0 : in_progress_list = repalloc(in_progress_list,
450 : allocsize * sizeof(*in_progress_list));
451 0 : in_progress_list_maxlen = allocsize;
452 : }
453 530507 : in_progress_offset = in_progress_list_len++;
454 530507 : in_progress_list[in_progress_offset] = type_id;
455 :
456 : /* Try to look up an existing entry */
457 530507 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
458 : &type_id,
459 : HASH_FIND, NULL);
460 530507 : if (typentry == NULL)
461 : {
462 : /*
463 : * If we didn't find one, we want to make one. But first look up the
464 : * pg_type row, just to make sure we don't make a cache entry for an
465 : * invalid type OID. If the type OID is not valid, present a
466 : * user-facing error, since some code paths such as domain_in() allow
467 : * this function to be reached with a user-supplied OID.
468 : */
469 : HeapTuple tp;
470 : Form_pg_type typtup;
471 :
472 20802 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
473 20802 : if (!HeapTupleIsValid(tp))
474 0 : ereport(ERROR,
475 : (errcode(ERRCODE_UNDEFINED_OBJECT),
476 : errmsg("type with OID %u does not exist", type_id)));
477 20802 : typtup = (Form_pg_type) GETSTRUCT(tp);
478 20802 : if (!typtup->typisdefined)
479 0 : ereport(ERROR,
480 : (errcode(ERRCODE_UNDEFINED_OBJECT),
481 : errmsg("type \"%s\" is only a shell",
482 : NameStr(typtup->typname))));
483 :
484 : /* Now make the typcache entry */
485 20802 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
486 : &type_id,
487 : HASH_ENTER, &found);
488 : Assert(!found); /* it wasn't there a moment ago */
489 :
490 1310526 : MemSet(typentry, 0, sizeof(TypeCacheEntry));
491 :
492 : /* These fields can never change, by definition */
493 20802 : typentry->type_id = type_id;
494 20802 : typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
495 :
496 : /* Keep this part in sync with the code below */
497 20802 : typentry->typlen = typtup->typlen;
498 20802 : typentry->typbyval = typtup->typbyval;
499 20802 : typentry->typalign = typtup->typalign;
500 20802 : typentry->typstorage = typtup->typstorage;
501 20802 : typentry->typtype = typtup->typtype;
502 20802 : typentry->typrelid = typtup->typrelid;
503 20802 : typentry->typsubscript = typtup->typsubscript;
504 20802 : typentry->typelem = typtup->typelem;
505 20802 : typentry->typarray = typtup->typarray;
506 20802 : typentry->typcollation = typtup->typcollation;
507 20802 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
508 :
509 : /* If it's a domain, immediately thread it into the domain cache list */
510 20802 : if (typentry->typtype == TYPTYPE_DOMAIN)
511 : {
512 1032 : typentry->nextDomain = firstDomainTypeEntry;
513 1032 : firstDomainTypeEntry = typentry;
514 : }
515 :
516 20802 : ReleaseSysCache(tp);
517 : }
518 509705 : else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
519 : {
520 : /*
521 : * We have an entry, but its pg_type row got changed, so reload the
522 : * data obtained directly from pg_type.
523 : */
524 : HeapTuple tp;
525 : Form_pg_type typtup;
526 :
527 359 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
528 359 : if (!HeapTupleIsValid(tp))
529 0 : ereport(ERROR,
530 : (errcode(ERRCODE_UNDEFINED_OBJECT),
531 : errmsg("type with OID %u does not exist", type_id)));
532 359 : typtup = (Form_pg_type) GETSTRUCT(tp);
533 359 : if (!typtup->typisdefined)
534 0 : ereport(ERROR,
535 : (errcode(ERRCODE_UNDEFINED_OBJECT),
536 : errmsg("type \"%s\" is only a shell",
537 : NameStr(typtup->typname))));
538 :
539 : /*
540 : * Keep this part in sync with the code above. Many of these fields
541 : * shouldn't ever change, particularly typtype, but copy 'em anyway.
542 : */
543 359 : typentry->typlen = typtup->typlen;
544 359 : typentry->typbyval = typtup->typbyval;
545 359 : typentry->typalign = typtup->typalign;
546 359 : typentry->typstorage = typtup->typstorage;
547 359 : typentry->typtype = typtup->typtype;
548 359 : typentry->typrelid = typtup->typrelid;
549 359 : typentry->typsubscript = typtup->typsubscript;
550 359 : typentry->typelem = typtup->typelem;
551 359 : typentry->typarray = typtup->typarray;
552 359 : typentry->typcollation = typtup->typcollation;
553 359 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
554 :
555 359 : ReleaseSysCache(tp);
556 : }
557 :
558 : /*
559 : * Look up opclasses if we haven't already and any dependent info is
560 : * requested.
561 : */
562 530507 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
563 : TYPECACHE_CMP_PROC |
564 : TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
565 354517 : TYPECACHE_BTREE_OPFAMILY)) &&
566 354517 : !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
567 : {
568 : Oid opclass;
569 :
570 18139 : opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
571 18139 : if (OidIsValid(opclass))
572 : {
573 17526 : typentry->btree_opf = get_opclass_family(opclass);
574 17526 : typentry->btree_opintype = get_opclass_input_type(opclass);
575 : }
576 : else
577 : {
578 613 : typentry->btree_opf = typentry->btree_opintype = InvalidOid;
579 : }
580 :
581 : /*
582 : * Reset information derived from btree opclass. Note in particular
583 : * that we'll redetermine the eq_opr even if we previously found one;
584 : * this matters in case a btree opclass has been added to a type that
585 : * previously had only a hash opclass.
586 : */
587 18139 : typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
588 : TCFLAGS_CHECKED_LT_OPR |
589 : TCFLAGS_CHECKED_GT_OPR |
590 : TCFLAGS_CHECKED_CMP_PROC);
591 18139 : typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
592 : }
593 :
594 : /*
595 : * If we need to look up equality operator, and there's no btree opclass,
596 : * force lookup of hash opclass.
597 : */
598 530507 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
599 334826 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
600 17957 : typentry->btree_opf == InvalidOid)
601 605 : flags |= TYPECACHE_HASH_OPFAMILY;
602 :
603 530507 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
604 : TYPECACHE_HASH_EXTENDED_PROC |
605 : TYPECACHE_HASH_EXTENDED_PROC_FINFO |
606 243353 : TYPECACHE_HASH_OPFAMILY)) &&
607 243353 : !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
608 : {
609 : Oid opclass;
610 :
611 13827 : opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
612 13827 : if (OidIsValid(opclass))
613 : {
614 13666 : typentry->hash_opf = get_opclass_family(opclass);
615 13666 : typentry->hash_opintype = get_opclass_input_type(opclass);
616 : }
617 : else
618 : {
619 161 : typentry->hash_opf = typentry->hash_opintype = InvalidOid;
620 : }
621 :
622 : /*
623 : * Reset information derived from hash opclass. We do *not* reset the
624 : * eq_opr; if we already found one from the btree opclass, that
625 : * decision is still good.
626 : */
627 13827 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
628 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
629 13827 : typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
630 : }
631 :
632 : /*
633 : * Look for requested operators and functions, if we haven't already.
634 : */
635 530507 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
636 334826 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
637 : {
638 17957 : Oid eq_opr = InvalidOid;
639 :
640 17957 : if (typentry->btree_opf != InvalidOid)
641 17352 : eq_opr = get_opfamily_member(typentry->btree_opf,
642 : typentry->btree_opintype,
643 : typentry->btree_opintype,
644 : BTEqualStrategyNumber);
645 17957 : if (eq_opr == InvalidOid &&
646 605 : typentry->hash_opf != InvalidOid)
647 484 : eq_opr = get_opfamily_member(typentry->hash_opf,
648 : typentry->hash_opintype,
649 : typentry->hash_opintype,
650 : HTEqualStrategyNumber);
651 :
652 : /*
653 : * If the proposed equality operator is array_eq or record_eq, check
654 : * to see if the element type or column types support equality. If
655 : * not, array_eq or record_eq would fail at runtime, so we don't want
656 : * to report that the type has equality. (We can omit similar
657 : * checking for ranges and multiranges because ranges can't be created
658 : * in the first place unless their subtypes support equality.)
659 : */
660 17957 : if (eq_opr == ARRAY_EQ_OP &&
661 1786 : !array_element_has_equality(typentry))
662 277 : eq_opr = InvalidOid;
663 17680 : else if (eq_opr == RECORD_EQ_OP &&
664 299 : !record_fields_have_equality(typentry))
665 145 : eq_opr = InvalidOid;
666 :
667 : /* Force update of eq_opr_finfo only if we're changing state */
668 17957 : if (typentry->eq_opr != eq_opr)
669 16376 : typentry->eq_opr_finfo.fn_oid = InvalidOid;
670 :
671 17957 : typentry->eq_opr = eq_opr;
672 :
673 : /*
674 : * Reset info about hash functions whenever we pick up new info about
675 : * equality operator. This is so we can ensure that the hash
676 : * functions match the operator.
677 : */
678 17957 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
679 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
680 17957 : typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
681 : }
682 530507 : if ((flags & TYPECACHE_LT_OPR) &&
683 186791 : !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
684 : {
685 11311 : Oid lt_opr = InvalidOid;
686 :
687 11311 : if (typentry->btree_opf != InvalidOid)
688 11084 : lt_opr = get_opfamily_member(typentry->btree_opf,
689 : typentry->btree_opintype,
690 : typentry->btree_opintype,
691 : BTLessStrategyNumber);
692 :
693 : /*
694 : * As above, make sure array_cmp or record_cmp will succeed; but again
695 : * we need no special check for ranges or multiranges.
696 : */
697 11311 : if (lt_opr == ARRAY_LT_OP &&
698 1282 : !array_element_has_compare(typentry))
699 298 : lt_opr = InvalidOid;
700 11013 : else if (lt_opr == RECORD_LT_OP &&
701 84 : !record_fields_have_compare(typentry))
702 8 : lt_opr = InvalidOid;
703 :
704 11311 : typentry->lt_opr = lt_opr;
705 11311 : typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
706 : }
707 530507 : if ((flags & TYPECACHE_GT_OPR) &&
708 180503 : !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
709 : {
710 11236 : Oid gt_opr = InvalidOid;
711 :
712 11236 : if (typentry->btree_opf != InvalidOid)
713 11017 : gt_opr = get_opfamily_member(typentry->btree_opf,
714 : typentry->btree_opintype,
715 : typentry->btree_opintype,
716 : BTGreaterStrategyNumber);
717 :
718 : /*
719 : * As above, make sure array_cmp or record_cmp will succeed; but again
720 : * we need no special check for ranges or multiranges.
721 : */
722 11236 : if (gt_opr == ARRAY_GT_OP &&
723 1279 : !array_element_has_compare(typentry))
724 298 : gt_opr = InvalidOid;
725 10938 : else if (gt_opr == RECORD_GT_OP &&
726 84 : !record_fields_have_compare(typentry))
727 8 : gt_opr = InvalidOid;
728 :
729 11236 : typentry->gt_opr = gt_opr;
730 11236 : typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
731 : }
732 530507 : if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
733 17231 : !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
734 : {
735 2771 : Oid cmp_proc = InvalidOid;
736 :
737 2771 : if (typentry->btree_opf != InvalidOid)
738 2654 : cmp_proc = get_opfamily_proc(typentry->btree_opf,
739 : typentry->btree_opintype,
740 : typentry->btree_opintype,
741 : BTORDER_PROC);
742 :
743 : /*
744 : * As above, make sure array_cmp or record_cmp will succeed; but again
745 : * we need no special check for ranges or multiranges.
746 : */
747 2771 : if (cmp_proc == F_BTARRAYCMP &&
748 578 : !array_element_has_compare(typentry))
749 133 : cmp_proc = InvalidOid;
750 2638 : else if (cmp_proc == F_BTRECORDCMP &&
751 180 : !record_fields_have_compare(typentry))
752 133 : cmp_proc = InvalidOid;
753 :
754 : /* Force update of cmp_proc_finfo only if we're changing state */
755 2771 : if (typentry->cmp_proc != cmp_proc)
756 2351 : typentry->cmp_proc_finfo.fn_oid = InvalidOid;
757 :
758 2771 : typentry->cmp_proc = cmp_proc;
759 2771 : typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
760 : }
761 530507 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
762 242843 : !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
763 : {
764 13689 : Oid hash_proc = InvalidOid;
765 :
766 : /*
767 : * We insist that the eq_opr, if one has been determined, match the
768 : * hash opclass; else report there is no hash function.
769 : */
770 13689 : if (typentry->hash_opf != InvalidOid &&
771 26490 : (!OidIsValid(typentry->eq_opr) ||
772 12913 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
773 : typentry->hash_opintype,
774 : typentry->hash_opintype,
775 : HTEqualStrategyNumber)))
776 13577 : hash_proc = get_opfamily_proc(typentry->hash_opf,
777 : typentry->hash_opintype,
778 : typentry->hash_opintype,
779 : HASHSTANDARD_PROC);
780 :
781 : /*
782 : * As above, make sure hash_array, hash_record, or hash_range will
783 : * succeed.
784 : */
785 13689 : if (hash_proc == F_HASH_ARRAY &&
786 1286 : !array_element_has_hashing(typentry))
787 182 : hash_proc = InvalidOid;
788 13507 : else if (hash_proc == F_HASH_RECORD &&
789 285 : !record_fields_have_hashing(typentry))
790 170 : hash_proc = InvalidOid;
791 13337 : else if (hash_proc == F_HASH_RANGE &&
792 65 : !range_element_has_hashing(typentry))
793 4 : hash_proc = InvalidOid;
794 :
795 : /*
796 : * Likewise for hash_multirange.
797 : */
798 13689 : if (hash_proc == F_HASH_MULTIRANGE &&
799 12 : !multirange_element_has_hashing(typentry))
800 4 : hash_proc = InvalidOid;
801 :
802 : /* Force update of hash_proc_finfo only if we're changing state */
803 13689 : if (typentry->hash_proc != hash_proc)
804 12034 : typentry->hash_proc_finfo.fn_oid = InvalidOid;
805 :
806 13689 : typentry->hash_proc = hash_proc;
807 13689 : typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
808 : }
809 530507 : if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
810 5795 : TYPECACHE_HASH_EXTENDED_PROC_FINFO)) &&
811 5795 : !(typentry->flags & TCFLAGS_CHECKED_HASH_EXTENDED_PROC))
812 : {
813 2322 : Oid hash_extended_proc = InvalidOid;
814 :
815 : /*
816 : * We insist that the eq_opr, if one has been determined, match the
817 : * hash opclass; else report there is no hash function.
818 : */
819 2322 : if (typentry->hash_opf != InvalidOid &&
820 4258 : (!OidIsValid(typentry->eq_opr) ||
821 1961 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
822 : typentry->hash_opintype,
823 : typentry->hash_opintype,
824 : HTEqualStrategyNumber)))
825 2297 : hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
826 : typentry->hash_opintype,
827 : typentry->hash_opintype,
828 : HASHEXTENDED_PROC);
829 :
830 : /*
831 : * As above, make sure hash_array_extended, hash_record_extended, or
832 : * hash_range_extended will succeed.
833 : */
834 2322 : if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
835 272 : !array_element_has_extended_hashing(typentry))
836 133 : hash_extended_proc = InvalidOid;
837 2189 : else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
838 141 : !record_fields_have_extended_hashing(typentry))
839 137 : hash_extended_proc = InvalidOid;
840 2052 : else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
841 0 : !range_element_has_extended_hashing(typentry))
842 0 : hash_extended_proc = InvalidOid;
843 :
844 : /*
845 : * Likewise for hash_multirange_extended.
846 : */
847 2322 : if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
848 0 : !multirange_element_has_extended_hashing(typentry))
849 0 : hash_extended_proc = InvalidOid;
850 :
851 : /* Force update of proc finfo only if we're changing state */
852 2322 : if (typentry->hash_extended_proc != hash_extended_proc)
853 2014 : typentry->hash_extended_proc_finfo.fn_oid = InvalidOid;
854 :
855 2322 : typentry->hash_extended_proc = hash_extended_proc;
856 2322 : typentry->flags |= TCFLAGS_CHECKED_HASH_EXTENDED_PROC;
857 : }
858 :
859 : /*
860 : * Set up fmgr lookup info as requested
861 : *
862 : * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
863 : * which is not quite right (they're really in the hash table's private
864 : * memory context) but this will do for our purposes.
865 : *
866 : * Note: the code above avoids invalidating the finfo structs unless the
867 : * referenced operator/function OID actually changes. This is to prevent
868 : * unnecessary leakage of any subsidiary data attached to an finfo, since
869 : * that would cause session-lifespan memory leaks.
870 : */
871 530507 : if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
872 3531 : typentry->eq_opr_finfo.fn_oid == InvalidOid &&
873 1041 : typentry->eq_opr != InvalidOid)
874 : {
875 : Oid eq_opr_func;
876 :
877 1037 : eq_opr_func = get_opcode(typentry->eq_opr);
878 1037 : if (eq_opr_func != InvalidOid)
879 1037 : fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
880 : CacheMemoryContext);
881 : }
882 530507 : if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
883 9841 : typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
884 2328 : typentry->cmp_proc != InvalidOid)
885 : {
886 930 : fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
887 : CacheMemoryContext);
888 : }
889 530507 : if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
890 5221 : typentry->hash_proc_finfo.fn_oid == InvalidOid &&
891 917 : typentry->hash_proc != InvalidOid)
892 : {
893 805 : fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
894 : CacheMemoryContext);
895 : }
896 530507 : if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
897 88 : typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
898 24 : typentry->hash_extended_proc != InvalidOid)
899 : {
900 16 : fmgr_info_cxt(typentry->hash_extended_proc,
901 : &typentry->hash_extended_proc_finfo,
902 : CacheMemoryContext);
903 : }
904 :
905 : /*
906 : * If it's a composite type (row type), get tupdesc if requested
907 : */
908 530507 : if ((flags & TYPECACHE_TUPDESC) &&
909 60942 : typentry->tupDesc == NULL &&
910 2573 : typentry->typtype == TYPTYPE_COMPOSITE)
911 : {
912 2506 : load_typcache_tupdesc(typentry);
913 : }
914 :
915 : /*
916 : * If requested, get information about a range type
917 : *
918 : * This includes making sure that the basic info about the range element
919 : * type is up-to-date.
920 : */
921 530507 : if ((flags & TYPECACHE_RANGE_INFO) &&
922 21425 : typentry->typtype == TYPTYPE_RANGE)
923 : {
924 21425 : if (typentry->rngelemtype == NULL)
925 443 : load_rangetype_info(typentry);
926 20982 : else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
927 4 : (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
928 : }
929 :
930 : /*
931 : * If requested, get information about a multirange type
932 : */
933 530507 : if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
934 9476 : typentry->rngtype == NULL &&
935 131 : typentry->typtype == TYPTYPE_MULTIRANGE)
936 : {
937 131 : load_multirangetype_info(typentry);
938 : }
939 :
940 : /*
941 : * If requested, get information about a domain type
942 : */
943 530507 : if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
944 6013 : typentry->domainBaseType == InvalidOid &&
945 4145 : typentry->typtype == TYPTYPE_DOMAIN)
946 : {
947 296 : typentry->domainBaseTypmod = -1;
948 296 : typentry->domainBaseType =
949 296 : getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
950 : }
951 530507 : if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
952 26541 : (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
953 3502 : typentry->typtype == TYPTYPE_DOMAIN)
954 : {
955 1903 : load_domaintype_info(typentry);
956 : }
957 :
958 530507 : INJECTION_POINT("typecache-before-rel-type-cache-insert", NULL);
959 :
960 : Assert(in_progress_offset + 1 == in_progress_list_len);
961 530506 : in_progress_list_len--;
962 :
963 530506 : insert_rel_type_cache_if_needed(typentry);
964 :
965 530506 : return typentry;
966 : }
967 :
968 : /*
969 : * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
970 : */
971 : static void
972 2683 : load_typcache_tupdesc(TypeCacheEntry *typentry)
973 : {
974 : Relation rel;
975 :
976 2683 : if (!OidIsValid(typentry->typrelid)) /* should not happen */
977 0 : elog(ERROR, "invalid typrelid for composite type %u",
978 : typentry->type_id);
979 2683 : rel = relation_open(typentry->typrelid, AccessShareLock);
980 : Assert(rel->rd_rel->reltype == typentry->type_id);
981 :
982 : /*
983 : * Link to the tupdesc and increment its refcount (we assert it's a
984 : * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
985 : * because the reference mustn't be entered in the current resource owner;
986 : * it can outlive the current query.
987 : */
988 2683 : typentry->tupDesc = RelationGetDescr(rel);
989 :
990 : Assert(typentry->tupDesc->tdrefcount > 0);
991 2683 : typentry->tupDesc->tdrefcount++;
992 :
993 : /*
994 : * In future, we could take some pains to not change tupDesc_identifier if
995 : * the tupdesc didn't really change; but for now it's not worth it.
996 : */
997 2683 : typentry->tupDesc_identifier = ++tupledesc_id_counter;
998 :
999 2683 : relation_close(rel, AccessShareLock);
1000 2683 : }
1001 :
1002 : /*
1003 : * load_rangetype_info --- helper routine to set up range type information
1004 : */
1005 : static void
1006 487 : load_rangetype_info(TypeCacheEntry *typentry)
1007 : {
1008 : Form_pg_range pg_range;
1009 : HeapTuple tup;
1010 : Oid subtypeOid;
1011 : Oid opclassOid;
1012 : Oid canonicalOid;
1013 : Oid subdiffOid;
1014 : Oid opfamilyOid;
1015 : Oid opcintype;
1016 : Oid cmpFnOid;
1017 :
1018 : /* get information from pg_range */
1019 487 : tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
1020 : /* should not fail, since we already checked typtype ... */
1021 487 : if (!HeapTupleIsValid(tup))
1022 0 : elog(ERROR, "cache lookup failed for range type %u",
1023 : typentry->type_id);
1024 487 : pg_range = (Form_pg_range) GETSTRUCT(tup);
1025 :
1026 487 : subtypeOid = pg_range->rngsubtype;
1027 487 : typentry->rng_collation = pg_range->rngcollation;
1028 487 : opclassOid = pg_range->rngsubopc;
1029 487 : canonicalOid = pg_range->rngcanonical;
1030 487 : subdiffOid = pg_range->rngsubdiff;
1031 :
1032 487 : ReleaseSysCache(tup);
1033 :
1034 : /* get opclass properties and look up the comparison function */
1035 487 : opfamilyOid = get_opclass_family(opclassOid);
1036 487 : opcintype = get_opclass_input_type(opclassOid);
1037 487 : typentry->rng_opfamily = opfamilyOid;
1038 :
1039 487 : cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1040 : BTORDER_PROC);
1041 487 : if (!RegProcedureIsValid(cmpFnOid))
1042 0 : elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1043 : BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1044 :
1045 : /* set up cached fmgrinfo structs */
1046 487 : fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
1047 : CacheMemoryContext);
1048 487 : if (OidIsValid(canonicalOid))
1049 315 : fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
1050 : CacheMemoryContext);
1051 487 : if (OidIsValid(subdiffOid))
1052 395 : fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
1053 : CacheMemoryContext);
1054 :
1055 : /* Lastly, set up link to the element type --- this marks data valid */
1056 487 : typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
1057 487 : }
1058 :
1059 : /*
1060 : * load_multirangetype_info --- helper routine to set up multirange type
1061 : * information
1062 : */
1063 : static void
1064 131 : load_multirangetype_info(TypeCacheEntry *typentry)
1065 : {
1066 : Oid rangetypeOid;
1067 :
1068 131 : rangetypeOid = get_multirange_range(typentry->type_id);
1069 131 : if (!OidIsValid(rangetypeOid))
1070 0 : elog(ERROR, "cache lookup failed for multirange type %u",
1071 : typentry->type_id);
1072 :
1073 131 : typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
1074 131 : }
1075 :
1076 : /*
1077 : * load_domaintype_info --- helper routine to set up domain constraint info
1078 : *
1079 : * Note: we assume we're called in a relatively short-lived context, so it's
1080 : * okay to leak data into the current context while scanning pg_constraint.
1081 : * We build the new DomainConstraintCache data in a context underneath
1082 : * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1083 : * complete.
1084 : */
1085 : static void
1086 1903 : load_domaintype_info(TypeCacheEntry *typentry)
1087 : {
1088 1903 : Oid typeOid = typentry->type_id;
1089 : DomainConstraintCache *dcc;
1090 1903 : bool notNull = false;
1091 : DomainConstraintState **ccons;
1092 : int cconslen;
1093 : Relation conRel;
1094 : MemoryContext oldcxt;
1095 :
1096 : /*
1097 : * If we're here, any existing constraint info is stale, so release it.
1098 : * For safety, be sure to null the link before trying to delete the data.
1099 : */
1100 1903 : if (typentry->domainData)
1101 : {
1102 516 : dcc = typentry->domainData;
1103 516 : typentry->domainData = NULL;
1104 516 : decr_dcc_refcount(dcc);
1105 : }
1106 :
1107 : /*
1108 : * We try to optimize the common case of no domain constraints, so don't
1109 : * create the dcc object and context until we find a constraint. Likewise
1110 : * for the temp sorting array.
1111 : */
1112 1903 : dcc = NULL;
1113 1903 : ccons = NULL;
1114 1903 : cconslen = 0;
1115 :
1116 : /*
1117 : * Scan pg_constraint for relevant constraints. We want to find
1118 : * constraints for not just this domain, but any ancestor domains, so the
1119 : * outer loop crawls up the domain stack.
1120 : */
1121 1903 : conRel = table_open(ConstraintRelationId, AccessShareLock);
1122 :
1123 : for (;;)
1124 1930 : {
1125 : HeapTuple tup;
1126 : HeapTuple conTup;
1127 : Form_pg_type typTup;
1128 3833 : int nccons = 0;
1129 : ScanKeyData key[1];
1130 : SysScanDesc scan;
1131 :
1132 3833 : tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1133 3833 : if (!HeapTupleIsValid(tup))
1134 0 : elog(ERROR, "cache lookup failed for type %u", typeOid);
1135 3833 : typTup = (Form_pg_type) GETSTRUCT(tup);
1136 :
1137 3833 : if (typTup->typtype != TYPTYPE_DOMAIN)
1138 : {
1139 : /* Not a domain, so done */
1140 1903 : ReleaseSysCache(tup);
1141 1903 : break;
1142 : }
1143 :
1144 : /* Test for NOT NULL Constraint */
1145 1930 : if (typTup->typnotnull)
1146 113 : notNull = true;
1147 :
1148 : /* Look for CHECK Constraints on this domain */
1149 1930 : ScanKeyInit(&key[0],
1150 : Anum_pg_constraint_contypid,
1151 : BTEqualStrategyNumber, F_OIDEQ,
1152 : ObjectIdGetDatum(typeOid));
1153 :
1154 1930 : scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1155 : NULL, 1, key);
1156 :
1157 2945 : while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1158 : {
1159 1015 : Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
1160 : Datum val;
1161 : bool isNull;
1162 : char *constring;
1163 : Expr *check_expr;
1164 : DomainConstraintState *r;
1165 :
1166 : /* Ignore non-CHECK constraints */
1167 1015 : if (c->contype != CONSTRAINT_CHECK)
1168 113 : continue;
1169 :
1170 : /* Not expecting conbin to be NULL, but we'll test for it anyway */
1171 902 : val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1172 : conRel->rd_att, &isNull);
1173 902 : if (isNull)
1174 0 : elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1175 : NameStr(typTup->typname), NameStr(c->conname));
1176 :
1177 : /* Create the DomainConstraintCache object and context if needed */
1178 902 : if (dcc == NULL)
1179 : {
1180 : MemoryContext cxt;
1181 :
1182 878 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1183 : "Domain constraints",
1184 : ALLOCSET_SMALL_SIZES);
1185 : dcc = (DomainConstraintCache *)
1186 878 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1187 878 : dcc->constraints = NIL;
1188 878 : dcc->dccContext = cxt;
1189 878 : dcc->dccRefCount = 0;
1190 : }
1191 :
1192 : /* Convert conbin to a node tree, still in caller's context */
1193 902 : constring = TextDatumGetCString(val);
1194 902 : check_expr = (Expr *) stringToNode(constring);
1195 :
1196 : /*
1197 : * Plan the expression, since ExecInitExpr will expect that.
1198 : *
1199 : * Note: caching the result of expression_planner() is not very
1200 : * good practice. Ideally we'd use a CachedExpression here so
1201 : * that we would react promptly to, eg, changes in inlined
1202 : * functions. However, because we don't support mutable domain
1203 : * CHECK constraints, it's not really clear that it's worth the
1204 : * extra overhead to do that.
1205 : */
1206 902 : check_expr = expression_planner(check_expr);
1207 :
1208 : /* Create only the minimally needed stuff in dccContext */
1209 902 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1210 :
1211 902 : r = makeNode(DomainConstraintState);
1212 902 : r->constrainttype = DOM_CONSTRAINT_CHECK;
1213 902 : r->name = pstrdup(NameStr(c->conname));
1214 902 : r->check_expr = copyObject(check_expr);
1215 902 : r->check_exprstate = NULL;
1216 :
1217 902 : MemoryContextSwitchTo(oldcxt);
1218 :
1219 : /* Accumulate constraints in an array, for sorting below */
1220 902 : if (ccons == NULL)
1221 : {
1222 878 : cconslen = 8;
1223 : ccons = (DomainConstraintState **)
1224 878 : palloc(cconslen * sizeof(DomainConstraintState *));
1225 : }
1226 24 : else if (nccons >= cconslen)
1227 : {
1228 0 : cconslen *= 2;
1229 : ccons = (DomainConstraintState **)
1230 0 : repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1231 : }
1232 902 : ccons[nccons++] = r;
1233 : }
1234 :
1235 1930 : systable_endscan(scan);
1236 :
1237 1930 : if (nccons > 0)
1238 : {
1239 : /*
1240 : * Sort the items for this domain, so that CHECKs are applied in a
1241 : * deterministic order.
1242 : */
1243 892 : if (nccons > 1)
1244 9 : qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1245 :
1246 : /*
1247 : * Now attach them to the overall list. Use lcons() here because
1248 : * constraints of parent domains should be applied earlier.
1249 : */
1250 892 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1251 1794 : while (nccons > 0)
1252 902 : dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1253 892 : MemoryContextSwitchTo(oldcxt);
1254 : }
1255 :
1256 : /* loop to next domain in stack */
1257 1930 : typeOid = typTup->typbasetype;
1258 1930 : ReleaseSysCache(tup);
1259 : }
1260 :
1261 1903 : table_close(conRel, AccessShareLock);
1262 :
1263 : /*
1264 : * Only need to add one NOT NULL check regardless of how many domains in
1265 : * the stack request it.
1266 : */
1267 1903 : if (notNull)
1268 : {
1269 : DomainConstraintState *r;
1270 :
1271 : /* Create the DomainConstraintCache object and context if needed */
1272 113 : if (dcc == NULL)
1273 : {
1274 : MemoryContext cxt;
1275 :
1276 83 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1277 : "Domain constraints",
1278 : ALLOCSET_SMALL_SIZES);
1279 : dcc = (DomainConstraintCache *)
1280 83 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1281 83 : dcc->constraints = NIL;
1282 83 : dcc->dccContext = cxt;
1283 83 : dcc->dccRefCount = 0;
1284 : }
1285 :
1286 : /* Create node trees in DomainConstraintCache's context */
1287 113 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1288 :
1289 113 : r = makeNode(DomainConstraintState);
1290 :
1291 113 : r->constrainttype = DOM_CONSTRAINT_NOTNULL;
1292 113 : r->name = pstrdup("NOT NULL");
1293 113 : r->check_expr = NULL;
1294 113 : r->check_exprstate = NULL;
1295 :
1296 : /* lcons to apply the nullness check FIRST */
1297 113 : dcc->constraints = lcons(r, dcc->constraints);
1298 :
1299 113 : MemoryContextSwitchTo(oldcxt);
1300 : }
1301 :
1302 : /*
1303 : * If we made a constraint object, move it into CacheMemoryContext and
1304 : * attach it to the typcache entry.
1305 : */
1306 1903 : if (dcc)
1307 : {
1308 961 : MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
1309 961 : typentry->domainData = dcc;
1310 961 : dcc->dccRefCount++; /* count the typcache's reference */
1311 : }
1312 :
1313 : /* Either way, the typcache entry's domain data is now valid. */
1314 1903 : typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
1315 1903 : }
1316 :
1317 : /*
1318 : * qsort comparator to sort DomainConstraintState pointers by name
1319 : */
1320 : static int
1321 10 : dcs_cmp(const void *a, const void *b)
1322 : {
1323 10 : const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1324 10 : const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1325 :
1326 10 : return strcmp((*ca)->name, (*cb)->name);
1327 : }
1328 :
1329 : /*
1330 : * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1331 : * and free it if no references remain
1332 : */
1333 : static void
1334 7697 : decr_dcc_refcount(DomainConstraintCache *dcc)
1335 : {
1336 : Assert(dcc->dccRefCount > 0);
1337 7697 : if (--(dcc->dccRefCount) <= 0)
1338 514 : MemoryContextDelete(dcc->dccContext);
1339 7697 : }
1340 :
1341 : /*
1342 : * Context reset/delete callback for a DomainConstraintRef
1343 : */
1344 : static void
1345 7574 : dccref_deletion_callback(void *arg)
1346 : {
1347 7574 : DomainConstraintRef *ref = (DomainConstraintRef *) arg;
1348 7574 : DomainConstraintCache *dcc = ref->dcc;
1349 :
1350 : /* Paranoia --- be sure link is nulled before trying to release */
1351 7574 : if (dcc)
1352 : {
1353 7181 : ref->constraints = NIL;
1354 7181 : ref->dcc = NULL;
1355 7181 : decr_dcc_refcount(dcc);
1356 : }
1357 7574 : }
1358 :
1359 : /*
1360 : * prep_domain_constraints --- prepare domain constraints for execution
1361 : *
1362 : * The expression trees stored in the DomainConstraintCache's list are
1363 : * converted to executable expression state trees stored in execctx.
1364 : */
1365 : static List *
1366 1689 : prep_domain_constraints(List *constraints, MemoryContext execctx)
1367 : {
1368 1689 : List *result = NIL;
1369 : MemoryContext oldcxt;
1370 : ListCell *lc;
1371 :
1372 1689 : oldcxt = MemoryContextSwitchTo(execctx);
1373 :
1374 3414 : foreach(lc, constraints)
1375 : {
1376 1725 : DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
1377 : DomainConstraintState *newr;
1378 :
1379 1725 : newr = makeNode(DomainConstraintState);
1380 1725 : newr->constrainttype = r->constrainttype;
1381 1725 : newr->name = r->name;
1382 1725 : newr->check_expr = r->check_expr;
1383 1725 : newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1384 :
1385 1725 : result = lappend(result, newr);
1386 : }
1387 :
1388 1689 : MemoryContextSwitchTo(oldcxt);
1389 :
1390 1689 : return result;
1391 : }
1392 :
1393 : /*
1394 : * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1395 : *
1396 : * Caller must tell us the MemoryContext in which the DomainConstraintRef
1397 : * lives. The ref will be cleaned up when that context is reset/deleted.
1398 : *
1399 : * Caller must also tell us whether it wants check_exprstate fields to be
1400 : * computed in the DomainConstraintState nodes attached to this ref.
1401 : * If it doesn't, we need not make a copy of the DomainConstraintState list.
1402 : */
1403 : void
1404 7588 : InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
1405 : MemoryContext refctx, bool need_exprstate)
1406 : {
1407 : /* Look up the typcache entry --- we assume it survives indefinitely */
1408 7588 : ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1409 7588 : ref->need_exprstate = need_exprstate;
1410 : /* For safety, establish the callback before acquiring a refcount */
1411 7588 : ref->refctx = refctx;
1412 7588 : ref->dcc = NULL;
1413 7588 : ref->callback.func = dccref_deletion_callback;
1414 7588 : ref->callback.arg = ref;
1415 7588 : MemoryContextRegisterResetCallback(refctx, &ref->callback);
1416 : /* Acquire refcount if there are constraints, and set up exported list */
1417 7588 : if (ref->tcache->domainData)
1418 : {
1419 7195 : ref->dcc = ref->tcache->domainData;
1420 7195 : ref->dcc->dccRefCount++;
1421 7195 : if (ref->need_exprstate)
1422 1689 : ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1423 : ref->refctx);
1424 : else
1425 5506 : ref->constraints = ref->dcc->constraints;
1426 : }
1427 : else
1428 393 : ref->constraints = NIL;
1429 7588 : }
1430 :
1431 : /*
1432 : * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1433 : *
1434 : * If the domain's constraint set changed, ref->constraints is updated to
1435 : * point at a new list of cached constraints.
1436 : *
1437 : * In the normal case where nothing happened to the domain, this is cheap
1438 : * enough that it's reasonable (and expected) to check before *each* use
1439 : * of the constraint info.
1440 : */
1441 : void
1442 253854 : UpdateDomainConstraintRef(DomainConstraintRef *ref)
1443 : {
1444 253854 : TypeCacheEntry *typentry = ref->tcache;
1445 :
1446 : /* Make sure typcache entry's data is up to date */
1447 253854 : if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1448 0 : typentry->typtype == TYPTYPE_DOMAIN)
1449 0 : load_domaintype_info(typentry);
1450 :
1451 : /* Transfer to ref object if there's new info, adjusting refcounts */
1452 253854 : if (ref->dcc != typentry->domainData)
1453 : {
1454 : /* Paranoia --- be sure link is nulled before trying to release */
1455 0 : DomainConstraintCache *dcc = ref->dcc;
1456 :
1457 0 : if (dcc)
1458 : {
1459 : /*
1460 : * Note: we just leak the previous list of executable domain
1461 : * constraints. Alternatively, we could keep those in a child
1462 : * context of ref->refctx and free that context at this point.
1463 : * However, in practice this code path will be taken so seldom
1464 : * that the extra bookkeeping for a child context doesn't seem
1465 : * worthwhile; we'll just allow a leak for the lifespan of refctx.
1466 : */
1467 0 : ref->constraints = NIL;
1468 0 : ref->dcc = NULL;
1469 0 : decr_dcc_refcount(dcc);
1470 : }
1471 0 : dcc = typentry->domainData;
1472 0 : if (dcc)
1473 : {
1474 0 : ref->dcc = dcc;
1475 0 : dcc->dccRefCount++;
1476 0 : if (ref->need_exprstate)
1477 0 : ref->constraints = prep_domain_constraints(dcc->constraints,
1478 : ref->refctx);
1479 : else
1480 0 : ref->constraints = dcc->constraints;
1481 : }
1482 : }
1483 253854 : }
1484 :
1485 : /*
1486 : * DomainHasConstraints --- utility routine to check if a domain has constraints
1487 : *
1488 : * Returns true if the domain has any constraints at all. If has_volatile
1489 : * is not NULL, also checks whether any CHECK constraint contains a volatile
1490 : * expression and sets *has_volatile accordingly.
1491 : *
1492 : * This is defined to return false, not fail, if type is not a domain.
1493 : */
1494 : bool
1495 18953 : DomainHasConstraints(Oid type_id, bool *has_volatile)
1496 : {
1497 : TypeCacheEntry *typentry;
1498 :
1499 : /*
1500 : * Note: a side effect is to cause the typcache's domain data to become
1501 : * valid. This is fine since we'll likely need it soon if there is any.
1502 : */
1503 18953 : typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1504 :
1505 18953 : if (typentry->domainData == NULL)
1506 13050 : return false;
1507 :
1508 5903 : if (has_volatile)
1509 : {
1510 56 : *has_volatile = false;
1511 :
1512 160 : foreach_node(DomainConstraintState, constrstate,
1513 : typentry->domainData->constraints)
1514 : {
1515 108 : if (constrstate->constrainttype == DOM_CONSTRAINT_CHECK &&
1516 48 : contain_volatile_functions((Node *) constrstate->check_expr))
1517 : {
1518 12 : *has_volatile = true;
1519 12 : break;
1520 : }
1521 : }
1522 : }
1523 :
1524 5903 : return true;
1525 : }
1526 :
1527 :
1528 : /*
1529 : * array_element_has_equality and friends are helper routines to check
1530 : * whether we should believe that array_eq and related functions will work
1531 : * on the given array type or composite type.
1532 : *
1533 : * The logic above may call these repeatedly on the same type entry, so we
1534 : * make use of the typentry->flags field to cache the results once known.
1535 : * Also, we assume that we'll probably want all these facts about the type
1536 : * if we want any, so we cache them all using only one lookup of the
1537 : * component datatype(s).
1538 : */
1539 :
1540 : static bool
1541 1786 : array_element_has_equality(TypeCacheEntry *typentry)
1542 : {
1543 1786 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1544 1513 : cache_array_element_properties(typentry);
1545 1786 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1546 : }
1547 :
1548 : static bool
1549 3139 : array_element_has_compare(TypeCacheEntry *typentry)
1550 : {
1551 3139 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1552 301 : cache_array_element_properties(typentry);
1553 3139 : return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1554 : }
1555 :
1556 : static bool
1557 1286 : array_element_has_hashing(TypeCacheEntry *typentry)
1558 : {
1559 1286 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1560 0 : cache_array_element_properties(typentry);
1561 1286 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1562 : }
1563 :
1564 : static bool
1565 272 : array_element_has_extended_hashing(TypeCacheEntry *typentry)
1566 : {
1567 272 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1568 0 : cache_array_element_properties(typentry);
1569 272 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1570 : }
1571 :
1572 : static void
1573 1814 : cache_array_element_properties(TypeCacheEntry *typentry)
1574 : {
1575 1814 : Oid elem_type = get_base_element_type(typentry->type_id);
1576 :
1577 1814 : if (OidIsValid(elem_type))
1578 : {
1579 : TypeCacheEntry *elementry;
1580 :
1581 1670 : elementry = lookup_type_cache(elem_type,
1582 : TYPECACHE_EQ_OPR |
1583 : TYPECACHE_CMP_PROC |
1584 : TYPECACHE_HASH_PROC |
1585 : TYPECACHE_HASH_EXTENDED_PROC);
1586 1670 : if (OidIsValid(elementry->eq_opr))
1587 1537 : typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1588 1670 : if (OidIsValid(elementry->cmp_proc))
1589 1432 : typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1590 1670 : if (OidIsValid(elementry->hash_proc))
1591 1529 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1592 1670 : if (OidIsValid(elementry->hash_extended_proc))
1593 1529 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1594 : }
1595 1814 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1596 1814 : }
1597 :
1598 : /*
1599 : * Likewise, some helper functions for composite types.
1600 : */
1601 :
1602 : static bool
1603 299 : record_fields_have_equality(TypeCacheEntry *typentry)
1604 : {
1605 299 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1606 277 : cache_record_field_properties(typentry);
1607 299 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1608 : }
1609 :
1610 : static bool
1611 348 : record_fields_have_compare(TypeCacheEntry *typentry)
1612 : {
1613 348 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1614 43 : cache_record_field_properties(typentry);
1615 348 : return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1616 : }
1617 :
1618 : static bool
1619 285 : record_fields_have_hashing(TypeCacheEntry *typentry)
1620 : {
1621 285 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1622 4 : cache_record_field_properties(typentry);
1623 285 : return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1624 : }
1625 :
1626 : static bool
1627 141 : record_fields_have_extended_hashing(TypeCacheEntry *typentry)
1628 : {
1629 141 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1630 0 : cache_record_field_properties(typentry);
1631 141 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1632 : }
1633 :
1634 : static void
1635 324 : cache_record_field_properties(TypeCacheEntry *typentry)
1636 : {
1637 : /*
1638 : * For type RECORD, we can't really tell what will work, since we don't
1639 : * have access here to the specific anonymous type. Just assume that
1640 : * equality and comparison will (we may get a failure at runtime). We
1641 : * could also claim that hashing works, but then if code that has the
1642 : * option between a comparison-based (sort-based) and a hash-based plan
1643 : * chooses hashing, stuff could fail that would otherwise work if it chose
1644 : * a comparison-based plan. In practice more types support comparison
1645 : * than hashing.
1646 : */
1647 324 : if (typentry->type_id == RECORDOID)
1648 : {
1649 33 : typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1650 : TCFLAGS_HAVE_FIELD_COMPARE);
1651 : }
1652 291 : else if (typentry->typtype == TYPTYPE_COMPOSITE)
1653 : {
1654 : TupleDesc tupdesc;
1655 : int newflags;
1656 : int i;
1657 :
1658 : /* Fetch composite type's tupdesc if we don't have it already */
1659 291 : if (typentry->tupDesc == NULL)
1660 177 : load_typcache_tupdesc(typentry);
1661 291 : tupdesc = typentry->tupDesc;
1662 :
1663 : /* Must bump the refcount while we do additional catalog lookups */
1664 291 : IncrTupleDescRefCount(tupdesc);
1665 :
1666 : /* Have each property if all non-dropped fields have the property */
1667 291 : newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1668 : TCFLAGS_HAVE_FIELD_COMPARE |
1669 : TCFLAGS_HAVE_FIELD_HASHING |
1670 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1671 4106 : for (i = 0; i < tupdesc->natts; i++)
1672 : {
1673 : TypeCacheEntry *fieldentry;
1674 3960 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1675 :
1676 3960 : if (attr->attisdropped)
1677 0 : continue;
1678 :
1679 3960 : fieldentry = lookup_type_cache(attr->atttypid,
1680 : TYPECACHE_EQ_OPR |
1681 : TYPECACHE_CMP_PROC |
1682 : TYPECACHE_HASH_PROC |
1683 : TYPECACHE_HASH_EXTENDED_PROC);
1684 3960 : if (!OidIsValid(fieldentry->eq_opr))
1685 145 : newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1686 3960 : if (!OidIsValid(fieldentry->cmp_proc))
1687 145 : newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1688 3960 : if (!OidIsValid(fieldentry->hash_proc))
1689 149 : newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1690 3960 : if (!OidIsValid(fieldentry->hash_extended_proc))
1691 149 : newflags &= ~TCFLAGS_HAVE_FIELD_EXTENDED_HASHING;
1692 :
1693 : /* We can drop out of the loop once we disprove all bits */
1694 3960 : if (newflags == 0)
1695 145 : break;
1696 : }
1697 291 : typentry->flags |= newflags;
1698 :
1699 291 : DecrTupleDescRefCount(tupdesc);
1700 : }
1701 0 : else if (typentry->typtype == TYPTYPE_DOMAIN)
1702 : {
1703 : /* If it's domain over composite, copy base type's properties */
1704 : TypeCacheEntry *baseentry;
1705 :
1706 : /* load up basetype info if we didn't already */
1707 0 : if (typentry->domainBaseType == InvalidOid)
1708 : {
1709 0 : typentry->domainBaseTypmod = -1;
1710 0 : typentry->domainBaseType =
1711 0 : getBaseTypeAndTypmod(typentry->type_id,
1712 : &typentry->domainBaseTypmod);
1713 : }
1714 0 : baseentry = lookup_type_cache(typentry->domainBaseType,
1715 : TYPECACHE_EQ_OPR |
1716 : TYPECACHE_CMP_PROC |
1717 : TYPECACHE_HASH_PROC |
1718 : TYPECACHE_HASH_EXTENDED_PROC);
1719 0 : if (baseentry->typtype == TYPTYPE_COMPOSITE)
1720 : {
1721 0 : typentry->flags |= TCFLAGS_DOMAIN_BASE_IS_COMPOSITE;
1722 0 : typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1723 : TCFLAGS_HAVE_FIELD_COMPARE |
1724 : TCFLAGS_HAVE_FIELD_HASHING |
1725 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1726 : }
1727 : }
1728 324 : typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
1729 324 : }
1730 :
1731 : /*
1732 : * Likewise, some helper functions for range and multirange types.
1733 : *
1734 : * We can borrow the flag bits for array element properties to use for range
1735 : * element properties, since those flag bits otherwise have no use in a
1736 : * range or multirange type's typcache entry.
1737 : */
1738 :
1739 : static bool
1740 65 : range_element_has_hashing(TypeCacheEntry *typentry)
1741 : {
1742 65 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1743 65 : cache_range_element_properties(typentry);
1744 65 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1745 : }
1746 :
1747 : static bool
1748 0 : range_element_has_extended_hashing(TypeCacheEntry *typentry)
1749 : {
1750 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1751 0 : cache_range_element_properties(typentry);
1752 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1753 : }
1754 :
1755 : static void
1756 65 : cache_range_element_properties(TypeCacheEntry *typentry)
1757 : {
1758 : /* load up subtype link if we didn't already */
1759 65 : if (typentry->rngelemtype == NULL &&
1760 44 : typentry->typtype == TYPTYPE_RANGE)
1761 44 : load_rangetype_info(typentry);
1762 :
1763 65 : if (typentry->rngelemtype != NULL)
1764 : {
1765 : TypeCacheEntry *elementry;
1766 :
1767 : /* might need to calculate subtype's hash function properties */
1768 65 : elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1769 : TYPECACHE_HASH_PROC |
1770 : TYPECACHE_HASH_EXTENDED_PROC);
1771 65 : if (OidIsValid(elementry->hash_proc))
1772 61 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1773 65 : if (OidIsValid(elementry->hash_extended_proc))
1774 61 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1775 : }
1776 65 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1777 65 : }
1778 :
1779 : static bool
1780 12 : multirange_element_has_hashing(TypeCacheEntry *typentry)
1781 : {
1782 12 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1783 12 : cache_multirange_element_properties(typentry);
1784 12 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1785 : }
1786 :
1787 : static bool
1788 0 : multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
1789 : {
1790 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1791 0 : cache_multirange_element_properties(typentry);
1792 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1793 : }
1794 :
1795 : static void
1796 12 : cache_multirange_element_properties(TypeCacheEntry *typentry)
1797 : {
1798 : /* load up range link if we didn't already */
1799 12 : if (typentry->rngtype == NULL &&
1800 0 : typentry->typtype == TYPTYPE_MULTIRANGE)
1801 0 : load_multirangetype_info(typentry);
1802 :
1803 12 : if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1804 : {
1805 : TypeCacheEntry *elementry;
1806 :
1807 : /* might need to calculate subtype's hash function properties */
1808 12 : elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1809 : TYPECACHE_HASH_PROC |
1810 : TYPECACHE_HASH_EXTENDED_PROC);
1811 12 : if (OidIsValid(elementry->hash_proc))
1812 8 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1813 12 : if (OidIsValid(elementry->hash_extended_proc))
1814 8 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1815 : }
1816 12 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1817 12 : }
1818 :
1819 : /*
1820 : * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1821 : * to store 'typmod'.
1822 : */
1823 : static void
1824 9761 : ensure_record_cache_typmod_slot_exists(int32 typmod)
1825 : {
1826 9761 : if (RecordCacheArray == NULL)
1827 : {
1828 3774 : RecordCacheArray = (RecordCacheArrayEntry *)
1829 3774 : MemoryContextAllocZero(CacheMemoryContext,
1830 : 64 * sizeof(RecordCacheArrayEntry));
1831 3774 : RecordCacheArrayLen = 64;
1832 : }
1833 :
1834 9761 : if (typmod >= RecordCacheArrayLen)
1835 : {
1836 0 : int32 newlen = pg_nextpower2_32(typmod + 1);
1837 :
1838 0 : RecordCacheArray = repalloc0_array(RecordCacheArray,
1839 : RecordCacheArrayEntry,
1840 : RecordCacheArrayLen,
1841 : newlen);
1842 0 : RecordCacheArrayLen = newlen;
1843 : }
1844 9761 : }
1845 :
1846 : /*
1847 : * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1848 : *
1849 : * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1850 : * hasn't had its refcount bumped.
1851 : */
1852 : static TupleDesc
1853 90362 : lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1854 : {
1855 90362 : if (type_id != RECORDOID)
1856 : {
1857 : /*
1858 : * It's a named composite type, so use the regular typcache.
1859 : */
1860 : TypeCacheEntry *typentry;
1861 :
1862 39514 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1863 39513 : if (typentry->tupDesc == NULL && !noError)
1864 0 : ereport(ERROR,
1865 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1866 : errmsg("type %s is not composite",
1867 : format_type_be(type_id))));
1868 39513 : return typentry->tupDesc;
1869 : }
1870 : else
1871 : {
1872 : /*
1873 : * It's a transient record type, so look in our record-type table.
1874 : */
1875 50848 : if (typmod >= 0)
1876 : {
1877 : /* It is already in our local cache? */
1878 50840 : if (typmod < RecordCacheArrayLen &&
1879 50836 : RecordCacheArray[typmod].tupdesc != NULL)
1880 50820 : return RecordCacheArray[typmod].tupdesc;
1881 :
1882 : /* Are we attached to a shared record typmod registry? */
1883 20 : if (CurrentSession->shared_typmod_registry != NULL)
1884 : {
1885 : SharedTypmodTableEntry *entry;
1886 :
1887 : /* Try to find it in the shared typmod index. */
1888 20 : entry = dshash_find(CurrentSession->shared_typmod_table,
1889 : &typmod, false);
1890 20 : if (entry != NULL)
1891 : {
1892 : TupleDesc tupdesc;
1893 :
1894 : tupdesc = (TupleDesc)
1895 20 : dsa_get_address(CurrentSession->area,
1896 : entry->shared_tupdesc);
1897 : Assert(typmod == tupdesc->tdtypmod);
1898 :
1899 : /* We may need to extend the local RecordCacheArray. */
1900 20 : ensure_record_cache_typmod_slot_exists(typmod);
1901 :
1902 : /*
1903 : * Our local array can now point directly to the TupleDesc
1904 : * in shared memory, which is non-reference-counted.
1905 : */
1906 20 : RecordCacheArray[typmod].tupdesc = tupdesc;
1907 : Assert(tupdesc->tdrefcount == -1);
1908 :
1909 : /*
1910 : * We don't share tupdesc identifiers across processes, so
1911 : * assign one locally.
1912 : */
1913 20 : RecordCacheArray[typmod].id = ++tupledesc_id_counter;
1914 :
1915 20 : dshash_release_lock(CurrentSession->shared_typmod_table,
1916 : entry);
1917 :
1918 20 : return RecordCacheArray[typmod].tupdesc;
1919 : }
1920 : }
1921 : }
1922 :
1923 8 : if (!noError)
1924 0 : ereport(ERROR,
1925 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1926 : errmsg("record type has not been registered")));
1927 8 : return NULL;
1928 : }
1929 : }
1930 :
1931 : /*
1932 : * lookup_rowtype_tupdesc
1933 : *
1934 : * Given a typeid/typmod that should describe a known composite type,
1935 : * return the tuple descriptor for the type. Will ereport on failure.
1936 : * (Use ereport because this is reachable with user-specified OIDs,
1937 : * for example from record_in().)
1938 : *
1939 : * Note: on success, we increment the refcount of the returned TupleDesc,
1940 : * and log the reference in CurrentResourceOwner. Caller must call
1941 : * ReleaseTupleDesc when done using the tupdesc. (There are some
1942 : * cases in which the returned tupdesc is not refcounted, in which
1943 : * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1944 : * the tupdesc is guaranteed to live till process exit.)
1945 : */
1946 : TupleDesc
1947 47391 : lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1948 : {
1949 : TupleDesc tupDesc;
1950 :
1951 47391 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1952 47390 : PinTupleDesc(tupDesc);
1953 47390 : return tupDesc;
1954 : }
1955 :
1956 : /*
1957 : * lookup_rowtype_tupdesc_noerror
1958 : *
1959 : * As above, but if the type is not a known composite type and noError
1960 : * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1961 : * type_id is passed, you'll get an ereport anyway.)
1962 : */
1963 : TupleDesc
1964 12 : lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1965 : {
1966 : TupleDesc tupDesc;
1967 :
1968 12 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1969 12 : if (tupDesc != NULL)
1970 12 : PinTupleDesc(tupDesc);
1971 12 : return tupDesc;
1972 : }
1973 :
1974 : /*
1975 : * lookup_rowtype_tupdesc_copy
1976 : *
1977 : * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1978 : * copied into the CurrentMemoryContext and is not reference-counted.
1979 : */
1980 : TupleDesc
1981 42950 : lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1982 : {
1983 : TupleDesc tmp;
1984 :
1985 42950 : tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1986 42950 : return CreateTupleDescCopyConstr(tmp);
1987 : }
1988 :
1989 : /*
1990 : * lookup_rowtype_tupdesc_domain
1991 : *
1992 : * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1993 : * a domain over a named composite type; so this is effectively equivalent to
1994 : * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1995 : * except for being a tad faster.
1996 : *
1997 : * Note: the reason we don't fold the look-through-domain behavior into plain
1998 : * lookup_rowtype_tupdesc() is that we want callers to know they might be
1999 : * dealing with a domain. Otherwise they might construct a tuple that should
2000 : * be of the domain type, but not apply domain constraints.
2001 : */
2002 : TupleDesc
2003 2224 : lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
2004 : {
2005 : TupleDesc tupDesc;
2006 :
2007 2224 : if (type_id != RECORDOID)
2008 : {
2009 : /*
2010 : * Check for domain or named composite type. We might as well load
2011 : * whichever data is needed.
2012 : */
2013 : TypeCacheEntry *typentry;
2014 :
2015 2215 : typentry = lookup_type_cache(type_id,
2016 : TYPECACHE_TUPDESC |
2017 : TYPECACHE_DOMAIN_BASE_INFO);
2018 2215 : if (typentry->typtype == TYPTYPE_DOMAIN)
2019 12 : return lookup_rowtype_tupdesc_noerror(typentry->domainBaseType,
2020 : typentry->domainBaseTypmod,
2021 : noError);
2022 2203 : if (typentry->tupDesc == NULL && !noError)
2023 0 : ereport(ERROR,
2024 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2025 : errmsg("type %s is not composite",
2026 : format_type_be(type_id))));
2027 2203 : tupDesc = typentry->tupDesc;
2028 : }
2029 : else
2030 9 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2031 2212 : if (tupDesc != NULL)
2032 2204 : PinTupleDesc(tupDesc);
2033 2212 : return tupDesc;
2034 : }
2035 :
2036 : /*
2037 : * Hash function for the hash table of RecordCacheEntry.
2038 : */
2039 : static uint32
2040 284620 : record_type_typmod_hash(const void *data, size_t size)
2041 : {
2042 284620 : const RecordCacheEntry *entry = data;
2043 :
2044 284620 : return hashRowType(entry->tupdesc);
2045 : }
2046 :
2047 : /*
2048 : * Match function for the hash table of RecordCacheEntry.
2049 : */
2050 : static int
2051 270652 : record_type_typmod_compare(const void *a, const void *b, size_t size)
2052 : {
2053 270652 : const RecordCacheEntry *left = a;
2054 270652 : const RecordCacheEntry *right = b;
2055 :
2056 270652 : return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2057 : }
2058 :
2059 : /*
2060 : * assign_record_type_typmod
2061 : *
2062 : * Given a tuple descriptor for a RECORD type, find or create a cache entry
2063 : * for the type, and set the tupdesc's tdtypmod field to a value that will
2064 : * identify this cache entry to lookup_rowtype_tupdesc.
2065 : */
2066 : void
2067 274879 : assign_record_type_typmod(TupleDesc tupDesc)
2068 : {
2069 : RecordCacheEntry *recentry;
2070 : TupleDesc entDesc;
2071 : bool found;
2072 : MemoryContext oldcxt;
2073 :
2074 : Assert(tupDesc->tdtypeid == RECORDOID);
2075 :
2076 274879 : if (RecordCacheHash == NULL)
2077 : {
2078 : /* First time through: initialize the hash table */
2079 : HASHCTL ctl;
2080 :
2081 3774 : ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2082 3774 : ctl.entrysize = sizeof(RecordCacheEntry);
2083 3774 : ctl.hash = record_type_typmod_hash;
2084 3774 : ctl.match = record_type_typmod_compare;
2085 3774 : RecordCacheHash = hash_create("Record information cache", 64,
2086 : &ctl,
2087 : HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
2088 :
2089 : /* Also make sure CacheMemoryContext exists */
2090 3774 : if (!CacheMemoryContext)
2091 0 : CreateCacheMemoryContext();
2092 : }
2093 :
2094 : /*
2095 : * Find a hashtable entry for this tuple descriptor. We don't use
2096 : * HASH_ENTER yet, because if it's missing, we need to make sure that all
2097 : * the allocations succeed before we create the new entry.
2098 : */
2099 274879 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2100 : &tupDesc,
2101 : HASH_FIND, &found);
2102 274879 : if (found && recentry->tupdesc != NULL)
2103 : {
2104 265138 : tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2105 265138 : return;
2106 : }
2107 :
2108 : /* Not present, so need to manufacture an entry */
2109 9741 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2110 :
2111 : /* Look in the SharedRecordTypmodRegistry, if attached */
2112 9741 : entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
2113 9741 : if (entDesc == NULL)
2114 : {
2115 : /*
2116 : * Make sure we have room before we CreateTupleDescCopy() or advance
2117 : * NextRecordTypmod.
2118 : */
2119 9659 : ensure_record_cache_typmod_slot_exists(NextRecordTypmod);
2120 :
2121 : /* Reference-counted local cache only. */
2122 9659 : entDesc = CreateTupleDescCopy(tupDesc);
2123 9659 : entDesc->tdrefcount = 1;
2124 9659 : entDesc->tdtypmod = NextRecordTypmod++;
2125 : }
2126 : else
2127 : {
2128 82 : ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod);
2129 : }
2130 :
2131 9741 : RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
2132 :
2133 : /* Assign a unique tupdesc identifier, too. */
2134 9741 : RecordCacheArray[entDesc->tdtypmod].id = ++tupledesc_id_counter;
2135 :
2136 : /* Fully initialized; create the hash table entry */
2137 9741 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2138 : &tupDesc,
2139 : HASH_ENTER, NULL);
2140 9741 : recentry->tupdesc = entDesc;
2141 :
2142 : /* Update the caller's tuple descriptor. */
2143 9741 : tupDesc->tdtypmod = entDesc->tdtypmod;
2144 :
2145 9741 : MemoryContextSwitchTo(oldcxt);
2146 : }
2147 :
2148 : /*
2149 : * assign_record_type_identifier
2150 : *
2151 : * Get an identifier, which will be unique over the lifespan of this backend
2152 : * process, for the current tuple descriptor of the specified composite type.
2153 : * For named composite types, the value is guaranteed to change if the type's
2154 : * definition does. For registered RECORD types, the value will not change
2155 : * once assigned, since the registered type won't either. If an anonymous
2156 : * RECORD type is specified, we return a new identifier on each call.
2157 : */
2158 : uint64
2159 3697 : assign_record_type_identifier(Oid type_id, int32 typmod)
2160 : {
2161 3697 : if (type_id != RECORDOID)
2162 : {
2163 : /*
2164 : * It's a named composite type, so use the regular typcache.
2165 : */
2166 : TypeCacheEntry *typentry;
2167 :
2168 0 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2169 0 : if (typentry->tupDesc == NULL)
2170 0 : ereport(ERROR,
2171 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2172 : errmsg("type %s is not composite",
2173 : format_type_be(type_id))));
2174 : Assert(typentry->tupDesc_identifier != 0);
2175 0 : return typentry->tupDesc_identifier;
2176 : }
2177 : else
2178 : {
2179 : /*
2180 : * It's a transient record type, so look in our record-type table.
2181 : */
2182 3697 : if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2183 32 : RecordCacheArray[typmod].tupdesc != NULL)
2184 : {
2185 : Assert(RecordCacheArray[typmod].id != 0);
2186 32 : return RecordCacheArray[typmod].id;
2187 : }
2188 :
2189 : /* For anonymous or unrecognized record type, generate a new ID */
2190 3665 : return ++tupledesc_id_counter;
2191 : }
2192 : }
2193 :
2194 : /*
2195 : * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2196 : * This exists only to avoid exposing private innards of
2197 : * SharedRecordTypmodRegistry in a header.
2198 : */
2199 : size_t
2200 114 : SharedRecordTypmodRegistryEstimate(void)
2201 : {
2202 114 : return sizeof(SharedRecordTypmodRegistry);
2203 : }
2204 :
2205 : /*
2206 : * Initialize 'registry' in a pre-existing shared memory region, which must be
2207 : * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2208 : * bytes.
2209 : *
2210 : * 'area' will be used to allocate shared memory space as required for the
2211 : * typemod registration. The current process, expected to be a leader process
2212 : * in a parallel query, will be attached automatically and its current record
2213 : * types will be loaded into *registry. While attached, all calls to
2214 : * assign_record_type_typmod will use the shared registry. Worker backends
2215 : * will need to attach explicitly.
2216 : *
2217 : * Note that this function takes 'area' and 'segment' as arguments rather than
2218 : * accessing them via CurrentSession, because they aren't installed there
2219 : * until after this function runs.
2220 : */
2221 : void
2222 114 : SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry,
2223 : dsm_segment *segment,
2224 : dsa_area *area)
2225 : {
2226 : MemoryContext old_context;
2227 : dshash_table *record_table;
2228 : dshash_table *typmod_table;
2229 : int32 typmod;
2230 :
2231 : Assert(!IsParallelWorker());
2232 :
2233 : /* We can't already be attached to a shared registry. */
2234 : Assert(CurrentSession->shared_typmod_registry == NULL);
2235 : Assert(CurrentSession->shared_record_table == NULL);
2236 : Assert(CurrentSession->shared_typmod_table == NULL);
2237 :
2238 114 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2239 :
2240 : /* Create the hash table of tuple descriptors indexed by themselves. */
2241 114 : record_table = dshash_create(area, &srtr_record_table_params, area);
2242 :
2243 : /* Create the hash table of tuple descriptors indexed by typmod. */
2244 114 : typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2245 :
2246 114 : MemoryContextSwitchTo(old_context);
2247 :
2248 : /* Initialize the SharedRecordTypmodRegistry. */
2249 114 : registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2250 114 : registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2251 114 : pg_atomic_init_u32(®istry->next_typmod, NextRecordTypmod);
2252 :
2253 : /*
2254 : * Copy all entries from this backend's private registry into the shared
2255 : * registry.
2256 : */
2257 289 : for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2258 : {
2259 : SharedTypmodTableEntry *typmod_table_entry;
2260 : SharedRecordTableEntry *record_table_entry;
2261 : SharedRecordTableKey record_table_key;
2262 : dsa_pointer shared_dp;
2263 : TupleDesc tupdesc;
2264 : bool found;
2265 :
2266 175 : tupdesc = RecordCacheArray[typmod].tupdesc;
2267 175 : if (tupdesc == NULL)
2268 0 : continue;
2269 :
2270 : /* Copy the TupleDesc into shared memory. */
2271 175 : shared_dp = share_tupledesc(area, tupdesc, typmod);
2272 :
2273 : /* Insert into the typmod table. */
2274 175 : typmod_table_entry = dshash_find_or_insert(typmod_table,
2275 : &tupdesc->tdtypmod,
2276 : &found);
2277 175 : if (found)
2278 0 : elog(ERROR, "cannot create duplicate shared record typmod");
2279 175 : typmod_table_entry->typmod = tupdesc->tdtypmod;
2280 175 : typmod_table_entry->shared_tupdesc = shared_dp;
2281 175 : dshash_release_lock(typmod_table, typmod_table_entry);
2282 :
2283 : /* Insert into the record table. */
2284 175 : record_table_key.shared = false;
2285 175 : record_table_key.u.local_tupdesc = tupdesc;
2286 175 : record_table_entry = dshash_find_or_insert(record_table,
2287 : &record_table_key,
2288 : &found);
2289 175 : if (!found)
2290 : {
2291 175 : record_table_entry->key.shared = true;
2292 175 : record_table_entry->key.u.shared_tupdesc = shared_dp;
2293 : }
2294 175 : dshash_release_lock(record_table, record_table_entry);
2295 : }
2296 :
2297 : /*
2298 : * Set up the global state that will tell assign_record_type_typmod and
2299 : * lookup_rowtype_tupdesc_internal about the shared registry.
2300 : */
2301 114 : CurrentSession->shared_record_table = record_table;
2302 114 : CurrentSession->shared_typmod_table = typmod_table;
2303 114 : CurrentSession->shared_typmod_registry = registry;
2304 :
2305 : /*
2306 : * We install a detach hook in the leader, but only to handle cleanup on
2307 : * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2308 : * the memory, the leader process will use a shared registry until it
2309 : * exits.
2310 : */
2311 114 : on_dsm_detach(segment, shared_record_typmod_registry_detach, (Datum) 0);
2312 114 : }
2313 :
2314 : /*
2315 : * Attach to 'registry', which must have been initialized already by another
2316 : * backend. Future calls to assign_record_type_typmod and
2317 : * lookup_rowtype_tupdesc_internal will use the shared registry until the
2318 : * current session is detached.
2319 : */
2320 : void
2321 2002 : SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
2322 : {
2323 : MemoryContext old_context;
2324 : dshash_table *record_table;
2325 : dshash_table *typmod_table;
2326 :
2327 : Assert(IsParallelWorker());
2328 :
2329 : /* We can't already be attached to a shared registry. */
2330 : Assert(CurrentSession != NULL);
2331 : Assert(CurrentSession->segment != NULL);
2332 : Assert(CurrentSession->area != NULL);
2333 : Assert(CurrentSession->shared_typmod_registry == NULL);
2334 : Assert(CurrentSession->shared_record_table == NULL);
2335 : Assert(CurrentSession->shared_typmod_table == NULL);
2336 :
2337 : /*
2338 : * We can't already have typmods in our local cache, because they'd clash
2339 : * with those imported by SharedRecordTypmodRegistryInit. This should be
2340 : * a freshly started parallel worker. If we ever support worker
2341 : * recycling, a worker would need to zap its local cache in between
2342 : * servicing different queries, in order to be able to call this and
2343 : * synchronize typmods with a new leader; but that's problematic because
2344 : * we can't be very sure that record-typmod-related state hasn't escaped
2345 : * to anywhere else in the process.
2346 : */
2347 : Assert(NextRecordTypmod == 0);
2348 :
2349 2002 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2350 :
2351 : /* Attach to the two hash tables. */
2352 2002 : record_table = dshash_attach(CurrentSession->area,
2353 : &srtr_record_table_params,
2354 : registry->record_table_handle,
2355 2002 : CurrentSession->area);
2356 2002 : typmod_table = dshash_attach(CurrentSession->area,
2357 : &srtr_typmod_table_params,
2358 : registry->typmod_table_handle,
2359 : NULL);
2360 :
2361 2002 : MemoryContextSwitchTo(old_context);
2362 :
2363 : /*
2364 : * Set up detach hook to run at worker exit. Currently this is the same
2365 : * as the leader's detach hook, but in future they might need to be
2366 : * different.
2367 : */
2368 2002 : on_dsm_detach(CurrentSession->segment,
2369 : shared_record_typmod_registry_detach,
2370 : PointerGetDatum(registry));
2371 :
2372 : /*
2373 : * Set up the session state that will tell assign_record_type_typmod and
2374 : * lookup_rowtype_tupdesc_internal about the shared registry.
2375 : */
2376 2002 : CurrentSession->shared_typmod_registry = registry;
2377 2002 : CurrentSession->shared_record_table = record_table;
2378 2002 : CurrentSession->shared_typmod_table = typmod_table;
2379 2002 : }
2380 :
2381 : /*
2382 : * InvalidateCompositeTypeCacheEntry
2383 : * Invalidate particular TypeCacheEntry on Relcache inval callback
2384 : *
2385 : * Delete the cached tuple descriptor (if any) for the given composite
2386 : * type, and reset whatever info we have cached about the composite type's
2387 : * comparability.
2388 : */
2389 : static void
2390 7475 : InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
2391 : {
2392 : bool hadTupDescOrOpclass;
2393 :
2394 : Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2395 : OidIsValid(typentry->typrelid));
2396 :
2397 12731 : hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2398 5256 : (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2399 :
2400 : /* Delete tupdesc if we have it */
2401 7475 : if (typentry->tupDesc != NULL)
2402 : {
2403 : /*
2404 : * Release our refcount and free the tupdesc if none remain. We can't
2405 : * use DecrTupleDescRefCount here because this reference is not logged
2406 : * by the current resource owner.
2407 : */
2408 : Assert(typentry->tupDesc->tdrefcount > 0);
2409 2219 : if (--typentry->tupDesc->tdrefcount == 0)
2410 1827 : FreeTupleDesc(typentry->tupDesc);
2411 2219 : typentry->tupDesc = NULL;
2412 :
2413 : /*
2414 : * Also clear tupDesc_identifier, so that anyone watching it will
2415 : * realize that the tupdesc has changed.
2416 : */
2417 2219 : typentry->tupDesc_identifier = 0;
2418 : }
2419 :
2420 : /* Reset equality/comparison/hashing validity information */
2421 7475 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2422 :
2423 : /*
2424 : * Call delete_rel_type_cache_if_needed() if we actually cleared
2425 : * something.
2426 : */
2427 7475 : if (hadTupDescOrOpclass)
2428 2219 : delete_rel_type_cache_if_needed(typentry);
2429 7475 : }
2430 :
2431 : /*
2432 : * TypeCacheRelCallback
2433 : * Relcache inval callback function
2434 : *
2435 : * Delete the cached tuple descriptor (if any) for the given rel's composite
2436 : * type, or for all composite types if relid == InvalidOid. Also reset
2437 : * whatever info we have cached about the composite type's comparability.
2438 : *
2439 : * This is called when a relcache invalidation event occurs for the given
2440 : * relid. We can't use syscache to find a type corresponding to the given
2441 : * relation because the code can be called outside of transaction. Thus, we
2442 : * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2443 : */
2444 : static void
2445 1654149 : TypeCacheRelCallback(Datum arg, Oid relid)
2446 : {
2447 : TypeCacheEntry *typentry;
2448 :
2449 : /*
2450 : * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2451 : * callback wouldn't be registered
2452 : */
2453 1654149 : if (OidIsValid(relid))
2454 : {
2455 : RelIdToTypeIdCacheEntry *relentry;
2456 :
2457 : /*
2458 : * Find a RelIdToTypeIdCacheHash entry, which should exist as soon as
2459 : * corresponding typcache entry has something to clean.
2460 : */
2461 1653529 : relentry = (RelIdToTypeIdCacheEntry *) hash_search(RelIdToTypeIdCacheHash,
2462 : &relid,
2463 : HASH_FIND, NULL);
2464 :
2465 1653529 : if (relentry != NULL)
2466 : {
2467 7385 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
2468 7385 : &relentry->composite_typid,
2469 : HASH_FIND, NULL);
2470 :
2471 7385 : if (typentry != NULL)
2472 : {
2473 : Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2474 : Assert(relid == typentry->typrelid);
2475 :
2476 7385 : InvalidateCompositeTypeCacheEntry(typentry);
2477 : }
2478 : }
2479 :
2480 : /*
2481 : * Visit all the domain types sequentially. Typically, this shouldn't
2482 : * affect performance since domain types are less tended to bloat.
2483 : * Domain types are created manually, unlike composite types which are
2484 : * automatically created for every temporary table.
2485 : */
2486 1653529 : for (typentry = firstDomainTypeEntry;
2487 2780769 : typentry != NULL;
2488 1127240 : typentry = typentry->nextDomain)
2489 : {
2490 : /*
2491 : * If it's domain over composite, reset flags. (We don't bother
2492 : * trying to determine whether the specific base type needs a
2493 : * reset.) Note that if we haven't determined whether the base
2494 : * type is composite, we don't need to reset anything.
2495 : */
2496 1127240 : if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2497 0 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2498 : }
2499 : }
2500 : else
2501 : {
2502 : HASH_SEQ_STATUS status;
2503 :
2504 : /*
2505 : * Relid is invalid. By convention, we need to reset all composite
2506 : * types in cache. Also, we should reset flags for domain types, and
2507 : * we loop over all entries in hash, so, do it in a single scan.
2508 : */
2509 620 : hash_seq_init(&status, TypeCacheHash);
2510 3752 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2511 : {
2512 3132 : if (typentry->typtype == TYPTYPE_COMPOSITE)
2513 : {
2514 90 : InvalidateCompositeTypeCacheEntry(typentry);
2515 : }
2516 3042 : else if (typentry->typtype == TYPTYPE_DOMAIN)
2517 : {
2518 : /*
2519 : * If it's domain over composite, reset flags. (We don't
2520 : * bother trying to determine whether the specific base type
2521 : * needs a reset.) Note that if we haven't determined whether
2522 : * the base type is composite, we don't need to reset
2523 : * anything.
2524 : */
2525 24 : if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2526 0 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2527 : }
2528 : }
2529 : }
2530 1654149 : }
2531 :
2532 : /*
2533 : * TypeCacheTypCallback
2534 : * Syscache inval callback function
2535 : *
2536 : * This is called when a syscache invalidation event occurs for any
2537 : * pg_type row. If we have information cached about that type, mark
2538 : * it as needing to be reloaded.
2539 : */
2540 : static void
2541 549119 : TypeCacheTypCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
2542 : {
2543 : HASH_SEQ_STATUS status;
2544 : TypeCacheEntry *typentry;
2545 :
2546 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2547 :
2548 : /*
2549 : * By convention, zero hash value is passed to the callback as a sign that
2550 : * it's time to invalidate the whole cache. See sinval.c, inval.c and
2551 : * InvalidateSystemCachesExtended().
2552 : */
2553 549119 : if (hashvalue == 0)
2554 291 : hash_seq_init(&status, TypeCacheHash);
2555 : else
2556 548828 : hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2557 :
2558 1103091 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2559 : {
2560 4853 : bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2561 :
2562 : Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2563 :
2564 : /*
2565 : * Mark the data obtained directly from pg_type as invalid. Also, if
2566 : * it's a domain, typnotnull might've changed, so we'll need to
2567 : * recalculate its constraints.
2568 : */
2569 4853 : typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2570 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS);
2571 :
2572 : /*
2573 : * Call delete_rel_type_cache_if_needed() if we cleaned
2574 : * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2575 : */
2576 4853 : if (hadPgTypeData)
2577 2581 : delete_rel_type_cache_if_needed(typentry);
2578 : }
2579 549119 : }
2580 :
2581 : /*
2582 : * TypeCacheOpcCallback
2583 : * Syscache inval callback function
2584 : *
2585 : * This is called when a syscache invalidation event occurs for any pg_opclass
2586 : * row. In principle we could probably just invalidate data dependent on the
2587 : * particular opclass, but since updates on pg_opclass are rare in production
2588 : * it doesn't seem worth a lot of complication: we just mark all cached data
2589 : * invalid.
2590 : *
2591 : * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2592 : * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2593 : * is not allowed to be used to add/drop the primary operators and functions
2594 : * of an opclass, only cross-type members of a family; and the latter sorts
2595 : * of members are not going to get cached here.
2596 : */
2597 : static void
2598 1702 : TypeCacheOpcCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
2599 : {
2600 : HASH_SEQ_STATUS status;
2601 : TypeCacheEntry *typentry;
2602 :
2603 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2604 1702 : hash_seq_init(&status, TypeCacheHash);
2605 11585 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2606 : {
2607 8181 : bool hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2608 :
2609 : /* Reset equality/comparison/hashing validity information */
2610 8181 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2611 :
2612 : /*
2613 : * Call delete_rel_type_cache_if_needed() if we actually cleared some
2614 : * of TCFLAGS_OPERATOR_FLAGS.
2615 : */
2616 8181 : if (hadOpclass)
2617 1621 : delete_rel_type_cache_if_needed(typentry);
2618 : }
2619 1702 : }
2620 :
2621 : /*
2622 : * TypeCacheConstrCallback
2623 : * Syscache inval callback function
2624 : *
2625 : * This is called when a syscache invalidation event occurs for any
2626 : * pg_constraint row. We flush information about domain constraints
2627 : * when this happens.
2628 : *
2629 : * It's slightly annoying that we can't tell whether the inval event was for
2630 : * a domain constraint record or not; there's usually more update traffic
2631 : * for table constraints than domain constraints, so we'll do a lot of
2632 : * useless flushes. Still, this is better than the old no-caching-at-all
2633 : * approach to domain constraints.
2634 : */
2635 : static void
2636 161921 : TypeCacheConstrCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
2637 : {
2638 : TypeCacheEntry *typentry;
2639 :
2640 : /*
2641 : * Because this is called very frequently, and typically very few of the
2642 : * typcache entries are for domains, we don't use hash_seq_search here.
2643 : * Instead we thread all the domain-type entries together so that we can
2644 : * visit them cheaply.
2645 : */
2646 161921 : for (typentry = firstDomainTypeEntry;
2647 296711 : typentry != NULL;
2648 134790 : typentry = typentry->nextDomain)
2649 : {
2650 : /* Reset domain constraint validity information */
2651 134790 : typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
2652 : }
2653 161921 : }
2654 :
2655 :
2656 : /*
2657 : * Check if given OID is part of the subset that's sortable by comparisons
2658 : */
2659 : static inline bool
2660 151946 : enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
2661 : {
2662 : Oid offset;
2663 :
2664 151946 : if (arg < enumdata->bitmap_base)
2665 0 : return false;
2666 151946 : offset = arg - enumdata->bitmap_base;
2667 151946 : if (offset > (Oid) INT_MAX)
2668 0 : return false;
2669 151946 : return bms_is_member((int) offset, enumdata->sorted_values);
2670 : }
2671 :
2672 :
2673 : /*
2674 : * compare_values_of_enum
2675 : * Compare two members of an enum type.
2676 : * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2677 : *
2678 : * Note: currently, the enumData cache is refreshed only if we are asked
2679 : * to compare an enum value that is not already in the cache. This is okay
2680 : * because there is no support for re-ordering existing values, so comparisons
2681 : * of previously cached values will return the right answer even if other
2682 : * values have been added since we last loaded the cache.
2683 : *
2684 : * Note: the enum logic has a special-case rule about even-numbered versus
2685 : * odd-numbered OIDs, but we take no account of that rule here; this
2686 : * routine shouldn't even get called when that rule applies.
2687 : */
2688 : int
2689 76224 : compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
2690 : {
2691 : TypeCacheEnumData *enumdata;
2692 : EnumItem *item1;
2693 : EnumItem *item2;
2694 :
2695 : /*
2696 : * Equal OIDs are certainly equal --- this case was probably handled by
2697 : * our caller, but we may as well check.
2698 : */
2699 76224 : if (arg1 == arg2)
2700 0 : return 0;
2701 :
2702 : /* Load up the cache if first time through */
2703 76224 : if (tcache->enumData == NULL)
2704 6 : load_enum_cache_data(tcache);
2705 76224 : enumdata = tcache->enumData;
2706 :
2707 : /*
2708 : * If both OIDs are known-sorted, we can just compare them directly.
2709 : */
2710 151946 : if (enum_known_sorted(enumdata, arg1) &&
2711 75722 : enum_known_sorted(enumdata, arg2))
2712 : {
2713 0 : if (arg1 < arg2)
2714 0 : return -1;
2715 : else
2716 0 : return 1;
2717 : }
2718 :
2719 : /*
2720 : * Slow path: we have to identify their actual sort-order positions.
2721 : */
2722 76224 : item1 = find_enumitem(enumdata, arg1);
2723 76224 : item2 = find_enumitem(enumdata, arg2);
2724 :
2725 76224 : if (item1 == NULL || item2 == NULL)
2726 : {
2727 : /*
2728 : * We couldn't find one or both values. That means the enum has
2729 : * changed under us, so re-initialize the cache and try again. We
2730 : * don't bother retrying the known-sorted case in this path.
2731 : */
2732 0 : load_enum_cache_data(tcache);
2733 0 : enumdata = tcache->enumData;
2734 :
2735 0 : item1 = find_enumitem(enumdata, arg1);
2736 0 : item2 = find_enumitem(enumdata, arg2);
2737 :
2738 : /*
2739 : * If we still can't find the values, complain: we must have corrupt
2740 : * data.
2741 : */
2742 0 : if (item1 == NULL)
2743 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2744 : arg1, format_type_be(tcache->type_id));
2745 0 : if (item2 == NULL)
2746 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2747 : arg2, format_type_be(tcache->type_id));
2748 : }
2749 :
2750 76224 : if (item1->sort_order < item2->sort_order)
2751 25772 : return -1;
2752 50452 : else if (item1->sort_order > item2->sort_order)
2753 50452 : return 1;
2754 : else
2755 0 : return 0;
2756 : }
2757 :
2758 : /*
2759 : * Load (or re-load) the enumData member of the typcache entry.
2760 : */
2761 : static void
2762 6 : load_enum_cache_data(TypeCacheEntry *tcache)
2763 : {
2764 : TypeCacheEnumData *enumdata;
2765 : Relation enum_rel;
2766 : SysScanDesc enum_scan;
2767 : HeapTuple enum_tuple;
2768 : ScanKeyData skey;
2769 : EnumItem *items;
2770 : int numitems;
2771 : int maxitems;
2772 : Oid bitmap_base;
2773 : Bitmapset *bitmap;
2774 : MemoryContext oldcxt;
2775 : int bm_size,
2776 : start_pos;
2777 :
2778 : /* Check that this is actually an enum */
2779 6 : if (tcache->typtype != TYPTYPE_ENUM)
2780 0 : ereport(ERROR,
2781 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2782 : errmsg("%s is not an enum",
2783 : format_type_be(tcache->type_id))));
2784 :
2785 : /*
2786 : * Read all the information for members of the enum type. We collect the
2787 : * info in working memory in the caller's context, and then transfer it to
2788 : * permanent memory in CacheMemoryContext. This minimizes the risk of
2789 : * leaking memory from CacheMemoryContext in the event of an error partway
2790 : * through.
2791 : */
2792 6 : maxitems = 64;
2793 6 : items = palloc_array(EnumItem, maxitems);
2794 6 : numitems = 0;
2795 :
2796 : /* Scan pg_enum for the members of the target enum type. */
2797 6 : ScanKeyInit(&skey,
2798 : Anum_pg_enum_enumtypid,
2799 : BTEqualStrategyNumber, F_OIDEQ,
2800 : ObjectIdGetDatum(tcache->type_id));
2801 :
2802 6 : enum_rel = table_open(EnumRelationId, AccessShareLock);
2803 6 : enum_scan = systable_beginscan(enum_rel,
2804 : EnumTypIdLabelIndexId,
2805 : true, NULL,
2806 : 1, &skey);
2807 :
2808 49 : while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2809 : {
2810 43 : Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2811 :
2812 43 : if (numitems >= maxitems)
2813 : {
2814 0 : maxitems *= 2;
2815 0 : items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2816 : }
2817 43 : items[numitems].enum_oid = en->oid;
2818 43 : items[numitems].sort_order = en->enumsortorder;
2819 43 : numitems++;
2820 : }
2821 :
2822 6 : systable_endscan(enum_scan);
2823 6 : table_close(enum_rel, AccessShareLock);
2824 :
2825 : /* Sort the items into OID order */
2826 6 : qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2827 :
2828 : /*
2829 : * Here, we create a bitmap listing a subset of the enum's OIDs that are
2830 : * known to be in order and can thus be compared with just OID comparison.
2831 : *
2832 : * The point of this is that the enum's initial OIDs were certainly in
2833 : * order, so there is some subset that can be compared via OID comparison;
2834 : * and we'd rather not do binary searches unnecessarily.
2835 : *
2836 : * This is somewhat heuristic, and might identify a subset of OIDs that
2837 : * isn't exactly what the type started with. That's okay as long as the
2838 : * subset is correctly sorted.
2839 : */
2840 6 : bitmap_base = InvalidOid;
2841 6 : bitmap = NULL;
2842 6 : bm_size = 1; /* only save sets of at least 2 OIDs */
2843 :
2844 14 : for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2845 : {
2846 : /*
2847 : * Identify longest sorted subsequence starting at start_pos
2848 : */
2849 14 : Bitmapset *this_bitmap = bms_make_singleton(0);
2850 14 : int this_bm_size = 1;
2851 14 : Oid start_oid = items[start_pos].enum_oid;
2852 14 : float4 prev_order = items[start_pos].sort_order;
2853 : int i;
2854 :
2855 95 : for (i = start_pos + 1; i < numitems; i++)
2856 : {
2857 : Oid offset;
2858 :
2859 81 : offset = items[i].enum_oid - start_oid;
2860 : /* quit if bitmap would be too large; cutoff is arbitrary */
2861 81 : if (offset >= 8192)
2862 0 : break;
2863 : /* include the item if it's in-order */
2864 81 : if (items[i].sort_order > prev_order)
2865 : {
2866 43 : prev_order = items[i].sort_order;
2867 43 : this_bitmap = bms_add_member(this_bitmap, (int) offset);
2868 43 : this_bm_size++;
2869 : }
2870 : }
2871 :
2872 : /* Remember it if larger than previous best */
2873 14 : if (this_bm_size > bm_size)
2874 : {
2875 6 : bms_free(bitmap);
2876 6 : bitmap_base = start_oid;
2877 6 : bitmap = this_bitmap;
2878 6 : bm_size = this_bm_size;
2879 : }
2880 : else
2881 8 : bms_free(this_bitmap);
2882 :
2883 : /*
2884 : * Done if it's not possible to find a longer sequence in the rest of
2885 : * the list. In typical cases this will happen on the first
2886 : * iteration, which is why we create the bitmaps on the fly instead of
2887 : * doing a second pass over the list.
2888 : */
2889 14 : if (bm_size >= (numitems - start_pos - 1))
2890 6 : break;
2891 : }
2892 :
2893 : /* OK, copy the data into CacheMemoryContext */
2894 6 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2895 : enumdata = (TypeCacheEnumData *)
2896 6 : palloc(offsetof(TypeCacheEnumData, enum_values) +
2897 6 : numitems * sizeof(EnumItem));
2898 6 : enumdata->bitmap_base = bitmap_base;
2899 6 : enumdata->sorted_values = bms_copy(bitmap);
2900 6 : enumdata->num_values = numitems;
2901 6 : memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2902 6 : MemoryContextSwitchTo(oldcxt);
2903 :
2904 6 : pfree(items);
2905 6 : bms_free(bitmap);
2906 :
2907 : /* And link the finished cache struct into the typcache */
2908 6 : if (tcache->enumData != NULL)
2909 0 : pfree(tcache->enumData);
2910 6 : tcache->enumData = enumdata;
2911 6 : }
2912 :
2913 : /*
2914 : * Locate the EnumItem with the given OID, if present
2915 : */
2916 : static EnumItem *
2917 152448 : find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
2918 : {
2919 : EnumItem srch;
2920 :
2921 : /* On some versions of Solaris, bsearch of zero items dumps core */
2922 152448 : if (enumdata->num_values <= 0)
2923 0 : return NULL;
2924 :
2925 152448 : srch.enum_oid = arg;
2926 152448 : return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2927 : sizeof(EnumItem), enum_oid_cmp);
2928 : }
2929 :
2930 : /*
2931 : * qsort comparison function for OID-ordered EnumItems
2932 : */
2933 : static int
2934 307268 : enum_oid_cmp(const void *left, const void *right)
2935 : {
2936 307268 : const EnumItem *l = (const EnumItem *) left;
2937 307268 : const EnumItem *r = (const EnumItem *) right;
2938 :
2939 307268 : return pg_cmp_u32(l->enum_oid, r->enum_oid);
2940 : }
2941 :
2942 : /*
2943 : * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2944 : * to the given value and return a dsa_pointer.
2945 : */
2946 : static dsa_pointer
2947 236 : share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2948 : {
2949 : dsa_pointer shared_dp;
2950 : TupleDesc shared;
2951 :
2952 236 : shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2953 236 : shared = (TupleDesc) dsa_get_address(area, shared_dp);
2954 236 : TupleDescCopy(shared, tupdesc);
2955 236 : shared->tdtypmod = typmod;
2956 :
2957 236 : return shared_dp;
2958 : }
2959 :
2960 : /*
2961 : * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2962 : * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2963 : * Tuple descriptors returned by this function are not reference counted, and
2964 : * will exist at least as long as the current backend remained attached to the
2965 : * current session.
2966 : */
2967 : static TupleDesc
2968 9741 : find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
2969 : {
2970 : TupleDesc result;
2971 : SharedRecordTableKey key;
2972 : SharedRecordTableEntry *record_table_entry;
2973 : SharedTypmodTableEntry *typmod_table_entry;
2974 : dsa_pointer shared_dp;
2975 : bool found;
2976 : uint32 typmod;
2977 :
2978 : /* If not even attached, nothing to do. */
2979 9741 : if (CurrentSession->shared_typmod_registry == NULL)
2980 9659 : return NULL;
2981 :
2982 : /* Try to find a matching tuple descriptor in the record table. */
2983 82 : key.shared = false;
2984 82 : key.u.local_tupdesc = tupdesc;
2985 : record_table_entry = (SharedRecordTableEntry *)
2986 82 : dshash_find(CurrentSession->shared_record_table, &key, false);
2987 82 : if (record_table_entry)
2988 : {
2989 : Assert(record_table_entry->key.shared);
2990 21 : dshash_release_lock(CurrentSession->shared_record_table,
2991 : record_table_entry);
2992 : result = (TupleDesc)
2993 21 : dsa_get_address(CurrentSession->area,
2994 : record_table_entry->key.u.shared_tupdesc);
2995 : Assert(result->tdrefcount == -1);
2996 :
2997 21 : return result;
2998 : }
2999 :
3000 : /* Allocate a new typmod number. This will be wasted if we error out. */
3001 61 : typmod = (int)
3002 61 : pg_atomic_fetch_add_u32(&CurrentSession->shared_typmod_registry->next_typmod,
3003 : 1);
3004 :
3005 : /* Copy the TupleDesc into shared memory. */
3006 61 : shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
3007 :
3008 : /*
3009 : * Create an entry in the typmod table so that others will understand this
3010 : * typmod number.
3011 : */
3012 61 : PG_TRY();
3013 : {
3014 : typmod_table_entry = (SharedTypmodTableEntry *)
3015 61 : dshash_find_or_insert(CurrentSession->shared_typmod_table,
3016 : &typmod, &found);
3017 61 : if (found)
3018 0 : elog(ERROR, "cannot create duplicate shared record typmod");
3019 : }
3020 0 : PG_CATCH();
3021 : {
3022 0 : dsa_free(CurrentSession->area, shared_dp);
3023 0 : PG_RE_THROW();
3024 : }
3025 61 : PG_END_TRY();
3026 61 : typmod_table_entry->typmod = typmod;
3027 61 : typmod_table_entry->shared_tupdesc = shared_dp;
3028 61 : dshash_release_lock(CurrentSession->shared_typmod_table,
3029 : typmod_table_entry);
3030 :
3031 : /*
3032 : * Finally create an entry in the record table so others with matching
3033 : * tuple descriptors can reuse the typmod.
3034 : */
3035 : record_table_entry = (SharedRecordTableEntry *)
3036 61 : dshash_find_or_insert(CurrentSession->shared_record_table, &key,
3037 : &found);
3038 61 : if (found)
3039 : {
3040 : /*
3041 : * Someone concurrently inserted a matching tuple descriptor since the
3042 : * first time we checked. Use that one instead.
3043 : */
3044 0 : dshash_release_lock(CurrentSession->shared_record_table,
3045 : record_table_entry);
3046 :
3047 : /* Might as well free up the space used by the one we created. */
3048 0 : found = dshash_delete_key(CurrentSession->shared_typmod_table,
3049 : &typmod);
3050 : Assert(found);
3051 0 : dsa_free(CurrentSession->area, shared_dp);
3052 :
3053 : /* Return the one we found. */
3054 : Assert(record_table_entry->key.shared);
3055 : result = (TupleDesc)
3056 0 : dsa_get_address(CurrentSession->area,
3057 : record_table_entry->key.u.shared_tupdesc);
3058 : Assert(result->tdrefcount == -1);
3059 :
3060 0 : return result;
3061 : }
3062 :
3063 : /* Store it and return it. */
3064 61 : record_table_entry->key.shared = true;
3065 61 : record_table_entry->key.u.shared_tupdesc = shared_dp;
3066 61 : dshash_release_lock(CurrentSession->shared_record_table,
3067 : record_table_entry);
3068 : result = (TupleDesc)
3069 61 : dsa_get_address(CurrentSession->area, shared_dp);
3070 : Assert(result->tdrefcount == -1);
3071 :
3072 61 : return result;
3073 : }
3074 :
3075 : /*
3076 : * On-DSM-detach hook to forget about the current shared record typmod
3077 : * infrastructure. This is currently used by both leader and workers.
3078 : */
3079 : static void
3080 2116 : shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
3081 : {
3082 : /* Be cautious here: maybe we didn't finish initializing. */
3083 2116 : if (CurrentSession->shared_record_table != NULL)
3084 : {
3085 2116 : dshash_detach(CurrentSession->shared_record_table);
3086 2116 : CurrentSession->shared_record_table = NULL;
3087 : }
3088 2116 : if (CurrentSession->shared_typmod_table != NULL)
3089 : {
3090 2116 : dshash_detach(CurrentSession->shared_typmod_table);
3091 2116 : CurrentSession->shared_typmod_table = NULL;
3092 : }
3093 2116 : CurrentSession->shared_typmod_registry = NULL;
3094 2116 : }
3095 :
3096 : /*
3097 : * Insert RelIdToTypeIdCacheHash entry if needed.
3098 : */
3099 : static void
3100 530507 : insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
3101 : {
3102 : /* Immediately quit for non-composite types */
3103 530507 : if (typentry->typtype != TYPTYPE_COMPOSITE)
3104 468470 : return;
3105 :
3106 : /* typrelid should be given for composite types */
3107 : Assert(OidIsValid(typentry->typrelid));
3108 :
3109 : /*
3110 : * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3111 : * information indicating it should be here.
3112 : */
3113 62037 : if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3114 0 : (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3115 0 : typentry->tupDesc != NULL)
3116 : {
3117 : RelIdToTypeIdCacheEntry *relentry;
3118 : bool found;
3119 :
3120 62037 : relentry = (RelIdToTypeIdCacheEntry *) hash_search(RelIdToTypeIdCacheHash,
3121 62037 : &typentry->typrelid,
3122 : HASH_ENTER, &found);
3123 62037 : relentry->relid = typentry->typrelid;
3124 62037 : relentry->composite_typid = typentry->type_id;
3125 : }
3126 : }
3127 :
3128 : /*
3129 : * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3130 : * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3131 : * or tupDesc.
3132 : */
3133 : static void
3134 6421 : delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
3135 : {
3136 : #ifdef USE_ASSERT_CHECKING
3137 : int i;
3138 : bool is_in_progress = false;
3139 :
3140 : for (i = 0; i < in_progress_list_len; i++)
3141 : {
3142 : if (in_progress_list[i] == typentry->type_id)
3143 : {
3144 : is_in_progress = true;
3145 : break;
3146 : }
3147 : }
3148 : #endif
3149 :
3150 : /* Immediately quit for non-composite types */
3151 6421 : if (typentry->typtype != TYPTYPE_COMPOSITE)
3152 2725 : return;
3153 :
3154 : /* typrelid should be given for composite types */
3155 : Assert(OidIsValid(typentry->typrelid));
3156 :
3157 : /*
3158 : * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3159 : * information indicating entry should be still there.
3160 : */
3161 3696 : if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3162 1920 : !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3163 1867 : typentry->tupDesc == NULL)
3164 : {
3165 : bool found;
3166 :
3167 1473 : (void) hash_search(RelIdToTypeIdCacheHash,
3168 1473 : &typentry->typrelid,
3169 : HASH_REMOVE, &found);
3170 : Assert(found || is_in_progress);
3171 : }
3172 : else
3173 : {
3174 : #ifdef USE_ASSERT_CHECKING
3175 : /*
3176 : * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3177 : * entry if it should exist.
3178 : */
3179 : bool found;
3180 :
3181 : if (!is_in_progress)
3182 : {
3183 : (void) hash_search(RelIdToTypeIdCacheHash,
3184 : &typentry->typrelid,
3185 : HASH_FIND, &found);
3186 : Assert(found);
3187 : }
3188 : #endif
3189 : }
3190 : }
3191 :
3192 : /*
3193 : * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3194 : * entries, marked as in-progress by lookup_type_cache(). It may happen
3195 : * in case of an error or interruption during the lookup_type_cache() call.
3196 : */
3197 : static void
3198 626370 : finalize_in_progress_typentries(void)
3199 : {
3200 : int i;
3201 :
3202 626371 : for (i = 0; i < in_progress_list_len; i++)
3203 : {
3204 : TypeCacheEntry *typentry;
3205 :
3206 1 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
3207 1 : &in_progress_list[i],
3208 : HASH_FIND, NULL);
3209 1 : if (typentry)
3210 1 : insert_rel_type_cache_if_needed(typentry);
3211 : }
3212 :
3213 626370 : in_progress_list_len = 0;
3214 626370 : }
3215 :
3216 : void
3217 614553 : AtEOXact_TypeCache(void)
3218 : {
3219 614553 : finalize_in_progress_typentries();
3220 614553 : }
3221 :
3222 : void
3223 11817 : AtEOSubXact_TypeCache(void)
3224 : {
3225 11817 : finalize_in_progress_typentries();
3226 11817 : }
|