Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * typcache.c
4 : * POSTGRES type cache code
5 : *
6 : * The type cache exists to speed lookup of certain information about data
7 : * types that is not directly available from a type's pg_type row. For
8 : * example, we use a type's default btree opclass, or the default hash
9 : * opclass if no btree opclass exists, to determine which operators should
10 : * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 : *
12 : * Several seemingly-odd choices have been made to support use of the type
13 : * cache by generic array and record handling routines, such as array_eq(),
14 : * record_cmp(), and hash_array(). Because those routines are used as index
15 : * support operations, they cannot leak memory. To allow them to execute
16 : * efficiently, all information that they would like to re-use across calls
17 : * is kept in the type cache.
18 : *
19 : * Once created, a type cache entry lives as long as the backend does, so
20 : * there is no need for a call to release a cache entry. If the type is
21 : * dropped, the cache entry simply becomes wasted storage. This is not
22 : * expected to happen often, and assuming that typcache entries are good
23 : * permanently allows caching pointers to them in long-lived places.
24 : *
25 : * We have some provisions for updating cache entries if the stored data
26 : * becomes obsolete. Core data extracted from the pg_type row is updated
27 : * when we detect updates to pg_type. Information dependent on opclasses is
28 : * cleared if we detect updates to pg_opclass. We also support clearing the
29 : * tuple descriptor and operator/function parts of a rowtype's cache entry,
30 : * since those may need to change as a consequence of ALTER TABLE. Domain
31 : * constraint changes are also tracked properly.
32 : *
33 : *
34 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
35 : * Portions Copyright (c) 1994, Regents of the University of California
36 : *
37 : * IDENTIFICATION
38 : * src/backend/utils/cache/typcache.c
39 : *
40 : *-------------------------------------------------------------------------
41 : */
42 : #include "postgres.h"
43 :
44 : #include <limits.h>
45 :
46 : #include "access/hash.h"
47 : #include "access/htup_details.h"
48 : #include "access/nbtree.h"
49 : #include "access/parallel.h"
50 : #include "access/relation.h"
51 : #include "access/session.h"
52 : #include "access/table.h"
53 : #include "catalog/pg_am.h"
54 : #include "catalog/pg_constraint.h"
55 : #include "catalog/pg_enum.h"
56 : #include "catalog/pg_operator.h"
57 : #include "catalog/pg_range.h"
58 : #include "catalog/pg_type.h"
59 : #include "commands/defrem.h"
60 : #include "common/int.h"
61 : #include "executor/executor.h"
62 : #include "lib/dshash.h"
63 : #include "optimizer/optimizer.h"
64 : #include "port/pg_bitutils.h"
65 : #include "storage/lwlock.h"
66 : #include "utils/builtins.h"
67 : #include "utils/catcache.h"
68 : #include "utils/fmgroids.h"
69 : #include "utils/injection_point.h"
70 : #include "utils/inval.h"
71 : #include "utils/lsyscache.h"
72 : #include "utils/memutils.h"
73 : #include "utils/rel.h"
74 : #include "utils/syscache.h"
75 : #include "utils/typcache.h"
76 :
77 :
78 : /* The main type cache hashtable searched by lookup_type_cache */
79 : static HTAB *TypeCacheHash = NULL;
80 :
81 : /*
82 : * The mapping of relation's OID to the corresponding composite type OID.
83 : * We're keeping the map entry when the corresponding typentry has something
84 : * to clear i.e it has either TCFLAGS_HAVE_PG_TYPE_DATA, or
85 : * TCFLAGS_OPERATOR_FLAGS, or tupdesc.
86 : */
87 : static HTAB *RelIdToTypeIdCacheHash = NULL;
88 :
89 : typedef struct RelIdToTypeIdCacheEntry
90 : {
91 : Oid relid; /* OID of the relation */
92 : Oid composite_typid; /* OID of the relation's composite type */
93 : } RelIdToTypeIdCacheEntry;
94 :
95 : /* List of type cache entries for domain types */
96 : static TypeCacheEntry *firstDomainTypeEntry = NULL;
97 :
98 : /* Private flag bits in the TypeCacheEntry.flags field */
99 : #define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
100 : #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
101 : #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
102 : #define TCFLAGS_CHECKED_EQ_OPR 0x000008
103 : #define TCFLAGS_CHECKED_LT_OPR 0x000010
104 : #define TCFLAGS_CHECKED_GT_OPR 0x000020
105 : #define TCFLAGS_CHECKED_CMP_PROC 0x000040
106 : #define TCFLAGS_CHECKED_HASH_PROC 0x000080
107 : #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
108 : #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
109 : #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
110 : #define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
111 : #define TCFLAGS_HAVE_ELEM_HASHING 0x001000
112 : #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
113 : #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
114 : #define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
115 : #define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
116 : #define TCFLAGS_HAVE_FIELD_HASHING 0x020000
117 : #define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
118 : #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
119 : #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
120 :
121 : /* The flags associated with equality/comparison/hashing are all but these: */
122 : #define TCFLAGS_OPERATOR_FLAGS \
123 : (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
124 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
125 : TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
126 :
127 : /*
128 : * Data stored about a domain type's constraints. Note that we do not create
129 : * this struct for the common case of a constraint-less domain; we just set
130 : * domainData to NULL to indicate that.
131 : *
132 : * Within a DomainConstraintCache, we store expression plan trees, but the
133 : * check_exprstate fields of the DomainConstraintState nodes are just NULL.
134 : * When needed, expression evaluation nodes are built by flat-copying the
135 : * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
136 : * Such a node tree is not part of the DomainConstraintCache, but is
137 : * considered to belong to a DomainConstraintRef.
138 : */
139 : struct DomainConstraintCache
140 : {
141 : List *constraints; /* list of DomainConstraintState nodes */
142 : MemoryContext dccContext; /* memory context holding all associated data */
143 : long dccRefCount; /* number of references to this struct */
144 : };
145 :
146 : /* Private information to support comparisons of enum values */
147 : typedef struct
148 : {
149 : Oid enum_oid; /* OID of one enum value */
150 : float4 sort_order; /* its sort position */
151 : } EnumItem;
152 :
153 : typedef struct TypeCacheEnumData
154 : {
155 : Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
156 : Bitmapset *sorted_values; /* Set of OIDs known to be in order */
157 : int num_values; /* total number of values in enum */
158 : EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER];
159 : } TypeCacheEnumData;
160 :
161 : /*
162 : * We use a separate table for storing the definitions of non-anonymous
163 : * record types. Once defined, a record type will be remembered for the
164 : * life of the backend. Subsequent uses of the "same" record type (where
165 : * sameness means equalRowTypes) will refer to the existing table entry.
166 : *
167 : * Stored record types are remembered in a linear array of TupleDescs,
168 : * which can be indexed quickly with the assigned typmod. There is also
169 : * a hash table to speed searches for matching TupleDescs.
170 : */
171 :
172 : typedef struct RecordCacheEntry
173 : {
174 : TupleDesc tupdesc;
175 : } RecordCacheEntry;
176 :
177 : /*
178 : * To deal with non-anonymous record types that are exchanged by backends
179 : * involved in a parallel query, we also need a shared version of the above.
180 : */
181 : struct SharedRecordTypmodRegistry
182 : {
183 : /* A hash table for finding a matching TupleDesc. */
184 : dshash_table_handle record_table_handle;
185 : /* A hash table for finding a TupleDesc by typmod. */
186 : dshash_table_handle typmod_table_handle;
187 : /* A source of new record typmod numbers. */
188 : pg_atomic_uint32 next_typmod;
189 : };
190 :
191 : /*
192 : * When using shared tuple descriptors as hash table keys we need a way to be
193 : * able to search for an equal shared TupleDesc using a backend-local
194 : * TupleDesc. So we use this type which can hold either, and hash and compare
195 : * functions that know how to handle both.
196 : */
197 : typedef struct SharedRecordTableKey
198 : {
199 : union
200 : {
201 : TupleDesc local_tupdesc;
202 : dsa_pointer shared_tupdesc;
203 : } u;
204 : bool shared;
205 : } SharedRecordTableKey;
206 :
207 : /*
208 : * The shared version of RecordCacheEntry. This lets us look up a typmod
209 : * using a TupleDesc which may be in local or shared memory.
210 : */
211 : typedef struct SharedRecordTableEntry
212 : {
213 : SharedRecordTableKey key;
214 : } SharedRecordTableEntry;
215 :
216 : /*
217 : * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
218 : * up a TupleDesc in shared memory using a typmod.
219 : */
220 : typedef struct SharedTypmodTableEntry
221 : {
222 : uint32 typmod;
223 : dsa_pointer shared_tupdesc;
224 : } SharedTypmodTableEntry;
225 :
226 : static Oid *in_progress_list;
227 : static int in_progress_list_len;
228 : static int in_progress_list_maxlen;
229 :
230 : /*
231 : * A comparator function for SharedRecordTableKey.
232 : */
233 : static int
234 120 : shared_record_table_compare(const void *a, const void *b, size_t size,
235 : void *arg)
236 : {
237 120 : dsa_area *area = (dsa_area *) arg;
238 120 : SharedRecordTableKey *k1 = (SharedRecordTableKey *) a;
239 120 : SharedRecordTableKey *k2 = (SharedRecordTableKey *) b;
240 : TupleDesc t1;
241 : TupleDesc t2;
242 :
243 120 : if (k1->shared)
244 0 : t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
245 : else
246 120 : t1 = k1->u.local_tupdesc;
247 :
248 120 : if (k2->shared)
249 120 : t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
250 : else
251 0 : t2 = k2->u.local_tupdesc;
252 :
253 120 : return equalRowTypes(t1, t2) ? 0 : 1;
254 : }
255 :
256 : /*
257 : * A hash function for SharedRecordTableKey.
258 : */
259 : static uint32
260 248 : shared_record_table_hash(const void *a, size_t size, void *arg)
261 : {
262 248 : dsa_area *area = (dsa_area *) arg;
263 248 : SharedRecordTableKey *k = (SharedRecordTableKey *) a;
264 : TupleDesc t;
265 :
266 248 : if (k->shared)
267 0 : t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
268 : else
269 248 : t = k->u.local_tupdesc;
270 :
271 248 : return hashRowType(t);
272 : }
273 :
274 : /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
275 : static const dshash_parameters srtr_record_table_params = {
276 : sizeof(SharedRecordTableKey), /* unused */
277 : sizeof(SharedRecordTableEntry),
278 : shared_record_table_compare,
279 : shared_record_table_hash,
280 : dshash_memcpy,
281 : LWTRANCHE_PER_SESSION_RECORD_TYPE
282 : };
283 :
284 : /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
285 : static const dshash_parameters srtr_typmod_table_params = {
286 : sizeof(uint32),
287 : sizeof(SharedTypmodTableEntry),
288 : dshash_memcmp,
289 : dshash_memhash,
290 : dshash_memcpy,
291 : LWTRANCHE_PER_SESSION_RECORD_TYPMOD
292 : };
293 :
294 : /* hashtable for recognizing registered record types */
295 : static HTAB *RecordCacheHash = NULL;
296 :
297 : typedef struct RecordCacheArrayEntry
298 : {
299 : uint64 id;
300 : TupleDesc tupdesc;
301 : } RecordCacheArrayEntry;
302 :
303 : /* array of info about registered record types, indexed by assigned typmod */
304 : static RecordCacheArrayEntry *RecordCacheArray = NULL;
305 : static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
306 : static int32 NextRecordTypmod = 0; /* number of entries used */
307 :
308 : /*
309 : * Process-wide counter for generating unique tupledesc identifiers.
310 : * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
311 : * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
312 : */
313 : static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER;
314 :
315 : static void load_typcache_tupdesc(TypeCacheEntry *typentry);
316 : static void load_rangetype_info(TypeCacheEntry *typentry);
317 : static void load_multirangetype_info(TypeCacheEntry *typentry);
318 : static void load_domaintype_info(TypeCacheEntry *typentry);
319 : static int dcs_cmp(const void *a, const void *b);
320 : static void decr_dcc_refcount(DomainConstraintCache *dcc);
321 : static void dccref_deletion_callback(void *arg);
322 : static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
323 : static bool array_element_has_equality(TypeCacheEntry *typentry);
324 : static bool array_element_has_compare(TypeCacheEntry *typentry);
325 : static bool array_element_has_hashing(TypeCacheEntry *typentry);
326 : static bool array_element_has_extended_hashing(TypeCacheEntry *typentry);
327 : static void cache_array_element_properties(TypeCacheEntry *typentry);
328 : static bool record_fields_have_equality(TypeCacheEntry *typentry);
329 : static bool record_fields_have_compare(TypeCacheEntry *typentry);
330 : static bool record_fields_have_hashing(TypeCacheEntry *typentry);
331 : static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry);
332 : static void cache_record_field_properties(TypeCacheEntry *typentry);
333 : static bool range_element_has_hashing(TypeCacheEntry *typentry);
334 : static bool range_element_has_extended_hashing(TypeCacheEntry *typentry);
335 : static void cache_range_element_properties(TypeCacheEntry *typentry);
336 : static bool multirange_element_has_hashing(TypeCacheEntry *typentry);
337 : static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry);
338 : static void cache_multirange_element_properties(TypeCacheEntry *typentry);
339 : static void TypeCacheRelCallback(Datum arg, Oid relid);
340 : static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
341 : static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
342 : static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
343 : static void load_enum_cache_data(TypeCacheEntry *tcache);
344 : static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
345 : static int enum_oid_cmp(const void *left, const void *right);
346 : static void shared_record_typmod_registry_detach(dsm_segment *segment,
347 : Datum datum);
348 : static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc);
349 : static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
350 : uint32 typmod);
351 : static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry);
352 : static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry);
353 :
354 :
355 : /*
356 : * Hash function compatible with one-arg system cache hash function.
357 : */
358 : static uint32
359 830144 : type_cache_syshash(const void *key, Size keysize)
360 : {
361 : Assert(keysize == sizeof(Oid));
362 830144 : return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
363 : }
364 :
365 : /*
366 : * lookup_type_cache
367 : *
368 : * Fetch the type cache entry for the specified datatype, and make sure that
369 : * all the fields requested by bits in 'flags' are valid.
370 : *
371 : * The result is never NULL --- we will ereport() if the passed type OID is
372 : * invalid. Note however that we may fail to find one or more of the
373 : * values requested by 'flags'; the caller needs to check whether the fields
374 : * are InvalidOid or not.
375 : *
376 : * Note that while filling TypeCacheEntry we might process concurrent
377 : * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
378 : * invalidated. In this case, we typically only clear flags while values are
379 : * still available for the caller. It's expected that the caller holds
380 : * enough locks on type-depending objects that the values are still relevant.
381 : * It's also important that the tupdesc is filled after all other
382 : * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
383 : * invalidated during the lookup_type_cache() call.
384 : */
385 : TypeCacheEntry *
386 744232 : lookup_type_cache(Oid type_id, int flags)
387 : {
388 : TypeCacheEntry *typentry;
389 : bool found;
390 : int in_progress_offset;
391 :
392 744232 : if (TypeCacheHash == NULL)
393 : {
394 : /* First time through: initialize the hash table */
395 : HASHCTL ctl;
396 : int allocsize;
397 :
398 10398 : ctl.keysize = sizeof(Oid);
399 10398 : ctl.entrysize = sizeof(TypeCacheEntry);
400 :
401 : /*
402 : * TypeCacheEntry takes hash value from the system cache. For
403 : * TypeCacheHash we use the same hash in order to speedup search by
404 : * hash value. This is used by hash_seq_init_with_hash_value().
405 : */
406 10398 : ctl.hash = type_cache_syshash;
407 :
408 10398 : TypeCacheHash = hash_create("Type information cache", 64,
409 : &ctl, HASH_ELEM | HASH_FUNCTION);
410 :
411 : Assert(RelIdToTypeIdCacheHash == NULL);
412 :
413 10398 : ctl.keysize = sizeof(Oid);
414 10398 : ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
415 10398 : RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
416 : &ctl, HASH_ELEM | HASH_BLOBS);
417 :
418 : /* Also set up callbacks for SI invalidations */
419 10398 : CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
420 10398 : CacheRegisterSyscacheCallback(TYPEOID, TypeCacheTypCallback, (Datum) 0);
421 10398 : CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
422 10398 : CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
423 :
424 : /* Also make sure CacheMemoryContext exists */
425 10398 : if (!CacheMemoryContext)
426 0 : CreateCacheMemoryContext();
427 :
428 : /*
429 : * reserve enough in_progress_list slots for many cases
430 : */
431 10398 : allocsize = 4;
432 10398 : in_progress_list =
433 10398 : MemoryContextAlloc(CacheMemoryContext,
434 : allocsize * sizeof(*in_progress_list));
435 10398 : in_progress_list_maxlen = allocsize;
436 : }
437 :
438 : Assert(TypeCacheHash != NULL && RelIdToTypeIdCacheHash != NULL);
439 :
440 : /* Register to catch invalidation messages */
441 744232 : if (in_progress_list_len >= in_progress_list_maxlen)
442 : {
443 : int allocsize;
444 :
445 0 : allocsize = in_progress_list_maxlen * 2;
446 0 : in_progress_list = repalloc(in_progress_list,
447 : allocsize * sizeof(*in_progress_list));
448 0 : in_progress_list_maxlen = allocsize;
449 : }
450 744232 : in_progress_offset = in_progress_list_len++;
451 744232 : in_progress_list[in_progress_offset] = type_id;
452 :
453 : /* Try to look up an existing entry */
454 744232 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
455 : &type_id,
456 : HASH_FIND, NULL);
457 744232 : if (typentry == NULL)
458 : {
459 : /*
460 : * If we didn't find one, we want to make one. But first look up the
461 : * pg_type row, just to make sure we don't make a cache entry for an
462 : * invalid type OID. If the type OID is not valid, present a
463 : * user-facing error, since some code paths such as domain_in() allow
464 : * this function to be reached with a user-supplied OID.
465 : */
466 : HeapTuple tp;
467 : Form_pg_type typtup;
468 :
469 37978 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
470 37978 : if (!HeapTupleIsValid(tp))
471 0 : ereport(ERROR,
472 : (errcode(ERRCODE_UNDEFINED_OBJECT),
473 : errmsg("type with OID %u does not exist", type_id)));
474 37978 : typtup = (Form_pg_type) GETSTRUCT(tp);
475 37978 : if (!typtup->typisdefined)
476 0 : ereport(ERROR,
477 : (errcode(ERRCODE_UNDEFINED_OBJECT),
478 : errmsg("type \"%s\" is only a shell",
479 : NameStr(typtup->typname))));
480 :
481 : /* Now make the typcache entry */
482 37978 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
483 : &type_id,
484 : HASH_ENTER, &found);
485 : Assert(!found); /* it wasn't there a moment ago */
486 :
487 2392614 : MemSet(typentry, 0, sizeof(TypeCacheEntry));
488 :
489 : /* These fields can never change, by definition */
490 37978 : typentry->type_id = type_id;
491 37978 : typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
492 :
493 : /* Keep this part in sync with the code below */
494 37978 : typentry->typlen = typtup->typlen;
495 37978 : typentry->typbyval = typtup->typbyval;
496 37978 : typentry->typalign = typtup->typalign;
497 37978 : typentry->typstorage = typtup->typstorage;
498 37978 : typentry->typtype = typtup->typtype;
499 37978 : typentry->typrelid = typtup->typrelid;
500 37978 : typentry->typsubscript = typtup->typsubscript;
501 37978 : typentry->typelem = typtup->typelem;
502 37978 : typentry->typarray = typtup->typarray;
503 37978 : typentry->typcollation = typtup->typcollation;
504 37978 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
505 :
506 : /* If it's a domain, immediately thread it into the domain cache list */
507 37978 : if (typentry->typtype == TYPTYPE_DOMAIN)
508 : {
509 1620 : typentry->nextDomain = firstDomainTypeEntry;
510 1620 : firstDomainTypeEntry = typentry;
511 : }
512 :
513 37978 : ReleaseSysCache(tp);
514 : }
515 706254 : else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
516 : {
517 : /*
518 : * We have an entry, but its pg_type row got changed, so reload the
519 : * data obtained directly from pg_type.
520 : */
521 : HeapTuple tp;
522 : Form_pg_type typtup;
523 :
524 506 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
525 506 : if (!HeapTupleIsValid(tp))
526 0 : ereport(ERROR,
527 : (errcode(ERRCODE_UNDEFINED_OBJECT),
528 : errmsg("type with OID %u does not exist", type_id)));
529 506 : typtup = (Form_pg_type) GETSTRUCT(tp);
530 506 : if (!typtup->typisdefined)
531 0 : ereport(ERROR,
532 : (errcode(ERRCODE_UNDEFINED_OBJECT),
533 : errmsg("type \"%s\" is only a shell",
534 : NameStr(typtup->typname))));
535 :
536 : /*
537 : * Keep this part in sync with the code above. Many of these fields
538 : * shouldn't ever change, particularly typtype, but copy 'em anyway.
539 : */
540 506 : typentry->typlen = typtup->typlen;
541 506 : typentry->typbyval = typtup->typbyval;
542 506 : typentry->typalign = typtup->typalign;
543 506 : typentry->typstorage = typtup->typstorage;
544 506 : typentry->typtype = typtup->typtype;
545 506 : typentry->typrelid = typtup->typrelid;
546 506 : typentry->typsubscript = typtup->typsubscript;
547 506 : typentry->typelem = typtup->typelem;
548 506 : typentry->typarray = typtup->typarray;
549 506 : typentry->typcollation = typtup->typcollation;
550 506 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
551 :
552 506 : ReleaseSysCache(tp);
553 : }
554 :
555 : /*
556 : * Look up opclasses if we haven't already and any dependent info is
557 : * requested.
558 : */
559 744232 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
560 : TYPECACHE_CMP_PROC |
561 : TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
562 489560 : TYPECACHE_BTREE_OPFAMILY)) &&
563 489560 : !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
564 : {
565 : Oid opclass;
566 :
567 32768 : opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
568 32768 : if (OidIsValid(opclass))
569 : {
570 31684 : typentry->btree_opf = get_opclass_family(opclass);
571 31684 : typentry->btree_opintype = get_opclass_input_type(opclass);
572 : }
573 : else
574 : {
575 1084 : typentry->btree_opf = typentry->btree_opintype = InvalidOid;
576 : }
577 :
578 : /*
579 : * Reset information derived from btree opclass. Note in particular
580 : * that we'll redetermine the eq_opr even if we previously found one;
581 : * this matters in case a btree opclass has been added to a type that
582 : * previously had only a hash opclass.
583 : */
584 32768 : typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
585 : TCFLAGS_CHECKED_LT_OPR |
586 : TCFLAGS_CHECKED_GT_OPR |
587 : TCFLAGS_CHECKED_CMP_PROC);
588 32768 : typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
589 : }
590 :
591 : /*
592 : * If we need to look up equality operator, and there's no btree opclass,
593 : * force lookup of hash opclass.
594 : */
595 744232 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
596 463538 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
597 32518 : typentry->btree_opf == InvalidOid)
598 1072 : flags |= TYPECACHE_HASH_OPFAMILY;
599 :
600 744232 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
601 : TYPECACHE_HASH_EXTENDED_PROC |
602 : TYPECACHE_HASH_EXTENDED_PROC_FINFO |
603 316806 : TYPECACHE_HASH_OPFAMILY)) &&
604 316806 : !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
605 : {
606 : Oid opclass;
607 :
608 22468 : opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
609 22468 : if (OidIsValid(opclass))
610 : {
611 22208 : typentry->hash_opf = get_opclass_family(opclass);
612 22208 : typentry->hash_opintype = get_opclass_input_type(opclass);
613 : }
614 : else
615 : {
616 260 : typentry->hash_opf = typentry->hash_opintype = InvalidOid;
617 : }
618 :
619 : /*
620 : * Reset information derived from hash opclass. We do *not* reset the
621 : * eq_opr; if we already found one from the btree opclass, that
622 : * decision is still good.
623 : */
624 22468 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
625 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
626 22468 : typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
627 : }
628 :
629 : /*
630 : * Look for requested operators and functions, if we haven't already.
631 : */
632 744232 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
633 463538 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
634 : {
635 32518 : Oid eq_opr = InvalidOid;
636 :
637 32518 : if (typentry->btree_opf != InvalidOid)
638 31446 : eq_opr = get_opfamily_member(typentry->btree_opf,
639 : typentry->btree_opintype,
640 : typentry->btree_opintype,
641 : BTEqualStrategyNumber);
642 32518 : if (eq_opr == InvalidOid &&
643 1072 : typentry->hash_opf != InvalidOid)
644 872 : eq_opr = get_opfamily_member(typentry->hash_opf,
645 : typentry->hash_opintype,
646 : typentry->hash_opintype,
647 : HTEqualStrategyNumber);
648 :
649 : /*
650 : * If the proposed equality operator is array_eq or record_eq, check
651 : * to see if the element type or column types support equality. If
652 : * not, array_eq or record_eq would fail at runtime, so we don't want
653 : * to report that the type has equality. (We can omit similar
654 : * checking for ranges and multiranges because ranges can't be created
655 : * in the first place unless their subtypes support equality.)
656 : */
657 32518 : if (eq_opr == ARRAY_EQ_OP &&
658 2978 : !array_element_has_equality(typentry))
659 368 : eq_opr = InvalidOid;
660 32150 : else if (eq_opr == RECORD_EQ_OP &&
661 436 : !record_fields_have_equality(typentry))
662 192 : eq_opr = InvalidOid;
663 :
664 : /* Force update of eq_opr_finfo only if we're changing state */
665 32518 : if (typentry->eq_opr != eq_opr)
666 30946 : typentry->eq_opr_finfo.fn_oid = InvalidOid;
667 :
668 32518 : typentry->eq_opr = eq_opr;
669 :
670 : /*
671 : * Reset info about hash functions whenever we pick up new info about
672 : * equality operator. This is so we can ensure that the hash
673 : * functions match the operator.
674 : */
675 32518 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
676 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
677 32518 : typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
678 : }
679 744232 : if ((flags & TYPECACHE_LT_OPR) &&
680 289430 : !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
681 : {
682 18678 : Oid lt_opr = InvalidOid;
683 :
684 18678 : if (typentry->btree_opf != InvalidOid)
685 18264 : lt_opr = get_opfamily_member(typentry->btree_opf,
686 : typentry->btree_opintype,
687 : typentry->btree_opintype,
688 : BTLessStrategyNumber);
689 :
690 : /*
691 : * As above, make sure array_cmp or record_cmp will succeed; but again
692 : * we need no special check for ranges or multiranges.
693 : */
694 18678 : if (lt_opr == ARRAY_LT_OP &&
695 2270 : !array_element_has_compare(typentry))
696 544 : lt_opr = InvalidOid;
697 18134 : else if (lt_opr == RECORD_LT_OP &&
698 132 : !record_fields_have_compare(typentry))
699 12 : lt_opr = InvalidOid;
700 :
701 18678 : typentry->lt_opr = lt_opr;
702 18678 : typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
703 : }
704 744232 : if ((flags & TYPECACHE_GT_OPR) &&
705 281336 : !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
706 : {
707 18434 : Oid gt_opr = InvalidOid;
708 :
709 18434 : if (typentry->btree_opf != InvalidOid)
710 18048 : gt_opr = get_opfamily_member(typentry->btree_opf,
711 : typentry->btree_opintype,
712 : typentry->btree_opintype,
713 : BTGreaterStrategyNumber);
714 :
715 : /*
716 : * As above, make sure array_cmp or record_cmp will succeed; but again
717 : * we need no special check for ranges or multiranges.
718 : */
719 18434 : if (gt_opr == ARRAY_GT_OP &&
720 2254 : !array_element_has_compare(typentry))
721 544 : gt_opr = InvalidOid;
722 17890 : else if (gt_opr == RECORD_GT_OP &&
723 132 : !record_fields_have_compare(typentry))
724 12 : gt_opr = InvalidOid;
725 :
726 18434 : typentry->gt_opr = gt_opr;
727 18434 : typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
728 : }
729 744232 : if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
730 26420 : !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
731 : {
732 4410 : Oid cmp_proc = InvalidOid;
733 :
734 4410 : if (typentry->btree_opf != InvalidOid)
735 4202 : cmp_proc = get_opfamily_proc(typentry->btree_opf,
736 : typentry->btree_opintype,
737 : typentry->btree_opintype,
738 : BTORDER_PROC);
739 :
740 : /*
741 : * As above, make sure array_cmp or record_cmp will succeed; but again
742 : * we need no special check for ranges or multiranges.
743 : */
744 4410 : if (cmp_proc == F_BTARRAYCMP &&
745 910 : !array_element_has_compare(typentry))
746 174 : cmp_proc = InvalidOid;
747 4236 : else if (cmp_proc == F_BTRECORDCMP &&
748 248 : !record_fields_have_compare(typentry))
749 174 : cmp_proc = InvalidOid;
750 :
751 : /* Force update of cmp_proc_finfo only if we're changing state */
752 4410 : if (typentry->cmp_proc != cmp_proc)
753 3810 : typentry->cmp_proc_finfo.fn_oid = InvalidOid;
754 :
755 4410 : typentry->cmp_proc = cmp_proc;
756 4410 : typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
757 : }
758 744232 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
759 315926 : !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
760 : {
761 22202 : Oid hash_proc = InvalidOid;
762 :
763 : /*
764 : * We insist that the eq_opr, if one has been determined, match the
765 : * hash opclass; else report there is no hash function.
766 : */
767 22202 : if (typentry->hash_opf != InvalidOid &&
768 43078 : (!OidIsValid(typentry->eq_opr) ||
769 21044 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
770 : typentry->hash_opintype,
771 : typentry->hash_opintype,
772 : HTEqualStrategyNumber)))
773 22034 : hash_proc = get_opfamily_proc(typentry->hash_opf,
774 : typentry->hash_opintype,
775 : typentry->hash_opintype,
776 : HASHSTANDARD_PROC);
777 :
778 : /*
779 : * As above, make sure hash_array, hash_record, or hash_range will
780 : * succeed.
781 : */
782 22202 : if (hash_proc == F_HASH_ARRAY &&
783 2092 : !array_element_has_hashing(typentry))
784 192 : hash_proc = InvalidOid;
785 22010 : else if (hash_proc == F_HASH_RECORD &&
786 420 : !record_fields_have_hashing(typentry))
787 230 : hash_proc = InvalidOid;
788 21780 : else if (hash_proc == F_HASH_RANGE &&
789 120 : !range_element_has_hashing(typentry))
790 6 : hash_proc = InvalidOid;
791 :
792 : /*
793 : * Likewise for hash_multirange.
794 : */
795 22202 : if (hash_proc == F_HASH_MULTIRANGE &&
796 18 : !multirange_element_has_hashing(typentry))
797 6 : hash_proc = InvalidOid;
798 :
799 : /* Force update of hash_proc_finfo only if we're changing state */
800 22202 : if (typentry->hash_proc != hash_proc)
801 20390 : typentry->hash_proc_finfo.fn_oid = InvalidOid;
802 :
803 22202 : typentry->hash_proc = hash_proc;
804 22202 : typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
805 : }
806 744232 : if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
807 8384 : TYPECACHE_HASH_EXTENDED_PROC_FINFO)) &&
808 8384 : !(typentry->flags & TCFLAGS_CHECKED_HASH_EXTENDED_PROC))
809 : {
810 3672 : Oid hash_extended_proc = InvalidOid;
811 :
812 : /*
813 : * We insist that the eq_opr, if one has been determined, match the
814 : * hash opclass; else report there is no hash function.
815 : */
816 3672 : if (typentry->hash_opf != InvalidOid &&
817 6798 : (!OidIsValid(typentry->eq_opr) ||
818 3162 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
819 : typentry->hash_opintype,
820 : typentry->hash_opintype,
821 : HTEqualStrategyNumber)))
822 3636 : hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
823 : typentry->hash_opintype,
824 : typentry->hash_opintype,
825 : HASHEXTENDED_PROC);
826 :
827 : /*
828 : * As above, make sure hash_array_extended, hash_record_extended, or
829 : * hash_range_extended will succeed.
830 : */
831 3672 : if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
832 360 : !array_element_has_extended_hashing(typentry))
833 174 : hash_extended_proc = InvalidOid;
834 3498 : else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
835 188 : !record_fields_have_extended_hashing(typentry))
836 180 : hash_extended_proc = InvalidOid;
837 3318 : else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
838 0 : !range_element_has_extended_hashing(typentry))
839 0 : hash_extended_proc = InvalidOid;
840 :
841 : /*
842 : * Likewise for hash_multirange_extended.
843 : */
844 3672 : if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
845 0 : !multirange_element_has_extended_hashing(typentry))
846 0 : hash_extended_proc = InvalidOid;
847 :
848 : /* Force update of proc finfo only if we're changing state */
849 3672 : if (typentry->hash_extended_proc != hash_extended_proc)
850 3270 : typentry->hash_extended_proc_finfo.fn_oid = InvalidOid;
851 :
852 3672 : typentry->hash_extended_proc = hash_extended_proc;
853 3672 : typentry->flags |= TCFLAGS_CHECKED_HASH_EXTENDED_PROC;
854 : }
855 :
856 : /*
857 : * Set up fmgr lookup info as requested
858 : *
859 : * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
860 : * which is not quite right (they're really in the hash table's private
861 : * memory context) but this will do for our purposes.
862 : *
863 : * Note: the code above avoids invalidating the finfo structs unless the
864 : * referenced operator/function OID actually changes. This is to prevent
865 : * unnecessary leakage of any subsidiary data attached to an finfo, since
866 : * that would cause session-lifespan memory leaks.
867 : */
868 744232 : if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
869 5924 : typentry->eq_opr_finfo.fn_oid == InvalidOid &&
870 1796 : typentry->eq_opr != InvalidOid)
871 : {
872 : Oid eq_opr_func;
873 :
874 1790 : eq_opr_func = get_opcode(typentry->eq_opr);
875 1790 : if (eq_opr_func != InvalidOid)
876 1790 : fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
877 : CacheMemoryContext);
878 : }
879 744232 : if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
880 15112 : typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
881 4154 : typentry->cmp_proc != InvalidOid)
882 : {
883 1586 : fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
884 : CacheMemoryContext);
885 : }
886 744232 : if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
887 8104 : typentry->hash_proc_finfo.fn_oid == InvalidOid &&
888 1632 : typentry->hash_proc != InvalidOid)
889 : {
890 1434 : fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
891 : CacheMemoryContext);
892 : }
893 744232 : if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
894 114 : typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
895 36 : typentry->hash_extended_proc != InvalidOid)
896 : {
897 24 : fmgr_info_cxt(typentry->hash_extended_proc,
898 : &typentry->hash_extended_proc_finfo,
899 : CacheMemoryContext);
900 : }
901 :
902 : /*
903 : * If it's a composite type (row type), get tupdesc if requested
904 : */
905 744232 : if ((flags & TYPECACHE_TUPDESC) &&
906 83742 : typentry->tupDesc == NULL &&
907 3838 : typentry->typtype == TYPTYPE_COMPOSITE)
908 : {
909 3712 : load_typcache_tupdesc(typentry);
910 : }
911 :
912 : /*
913 : * If requested, get information about a range type
914 : *
915 : * This includes making sure that the basic info about the range element
916 : * type is up-to-date.
917 : */
918 744232 : if ((flags & TYPECACHE_RANGE_INFO) &&
919 29148 : typentry->typtype == TYPTYPE_RANGE)
920 : {
921 29148 : if (typentry->rngelemtype == NULL)
922 834 : load_rangetype_info(typentry);
923 28314 : else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
924 2 : (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
925 : }
926 :
927 : /*
928 : * If requested, get information about a multirange type
929 : */
930 744232 : if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
931 11908 : typentry->rngtype == NULL &&
932 254 : typentry->typtype == TYPTYPE_MULTIRANGE)
933 : {
934 254 : load_multirangetype_info(typentry);
935 : }
936 :
937 : /*
938 : * If requested, get information about a domain type
939 : */
940 744232 : if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
941 8478 : typentry->domainBaseType == InvalidOid &&
942 5562 : typentry->typtype == TYPTYPE_DOMAIN)
943 : {
944 498 : typentry->domainBaseTypmod = -1;
945 498 : typentry->domainBaseType =
946 498 : getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
947 : }
948 744232 : if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
949 42392 : (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
950 4928 : typentry->typtype == TYPTYPE_DOMAIN)
951 : {
952 2588 : load_domaintype_info(typentry);
953 : }
954 :
955 744232 : INJECTION_POINT("typecache-before-rel-type-cache-insert");
956 :
957 : Assert(in_progress_offset + 1 == in_progress_list_len);
958 744230 : in_progress_list_len--;
959 :
960 744230 : insert_rel_type_cache_if_needed(typentry);
961 :
962 744230 : return typentry;
963 : }
964 :
965 : /*
966 : * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
967 : */
968 : static void
969 3950 : load_typcache_tupdesc(TypeCacheEntry *typentry)
970 : {
971 : Relation rel;
972 :
973 3950 : if (!OidIsValid(typentry->typrelid)) /* should not happen */
974 0 : elog(ERROR, "invalid typrelid for composite type %u",
975 : typentry->type_id);
976 3950 : rel = relation_open(typentry->typrelid, AccessShareLock);
977 : Assert(rel->rd_rel->reltype == typentry->type_id);
978 :
979 : /*
980 : * Link to the tupdesc and increment its refcount (we assert it's a
981 : * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
982 : * because the reference mustn't be entered in the current resource owner;
983 : * it can outlive the current query.
984 : */
985 3950 : typentry->tupDesc = RelationGetDescr(rel);
986 :
987 : Assert(typentry->tupDesc->tdrefcount > 0);
988 3950 : typentry->tupDesc->tdrefcount++;
989 :
990 : /*
991 : * In future, we could take some pains to not change tupDesc_identifier if
992 : * the tupdesc didn't really change; but for now it's not worth it.
993 : */
994 3950 : typentry->tupDesc_identifier = ++tupledesc_id_counter;
995 :
996 3950 : relation_close(rel, AccessShareLock);
997 3950 : }
998 :
999 : /*
1000 : * load_rangetype_info --- helper routine to set up range type information
1001 : */
1002 : static void
1003 922 : load_rangetype_info(TypeCacheEntry *typentry)
1004 : {
1005 : Form_pg_range pg_range;
1006 : HeapTuple tup;
1007 : Oid subtypeOid;
1008 : Oid opclassOid;
1009 : Oid canonicalOid;
1010 : Oid subdiffOid;
1011 : Oid opfamilyOid;
1012 : Oid opcintype;
1013 : Oid cmpFnOid;
1014 :
1015 : /* get information from pg_range */
1016 922 : tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
1017 : /* should not fail, since we already checked typtype ... */
1018 922 : if (!HeapTupleIsValid(tup))
1019 0 : elog(ERROR, "cache lookup failed for range type %u",
1020 : typentry->type_id);
1021 922 : pg_range = (Form_pg_range) GETSTRUCT(tup);
1022 :
1023 922 : subtypeOid = pg_range->rngsubtype;
1024 922 : typentry->rng_collation = pg_range->rngcollation;
1025 922 : opclassOid = pg_range->rngsubopc;
1026 922 : canonicalOid = pg_range->rngcanonical;
1027 922 : subdiffOid = pg_range->rngsubdiff;
1028 :
1029 922 : ReleaseSysCache(tup);
1030 :
1031 : /* get opclass properties and look up the comparison function */
1032 922 : opfamilyOid = get_opclass_family(opclassOid);
1033 922 : opcintype = get_opclass_input_type(opclassOid);
1034 922 : typentry->rng_opfamily = opfamilyOid;
1035 :
1036 922 : cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1037 : BTORDER_PROC);
1038 922 : if (!RegProcedureIsValid(cmpFnOid))
1039 0 : elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1040 : BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1041 :
1042 : /* set up cached fmgrinfo structs */
1043 922 : fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
1044 : CacheMemoryContext);
1045 922 : if (OidIsValid(canonicalOid))
1046 606 : fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
1047 : CacheMemoryContext);
1048 922 : if (OidIsValid(subdiffOid))
1049 772 : fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
1050 : CacheMemoryContext);
1051 :
1052 : /* Lastly, set up link to the element type --- this marks data valid */
1053 922 : typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
1054 922 : }
1055 :
1056 : /*
1057 : * load_multirangetype_info --- helper routine to set up multirange type
1058 : * information
1059 : */
1060 : static void
1061 254 : load_multirangetype_info(TypeCacheEntry *typentry)
1062 : {
1063 : Oid rangetypeOid;
1064 :
1065 254 : rangetypeOid = get_multirange_range(typentry->type_id);
1066 254 : if (!OidIsValid(rangetypeOid))
1067 0 : elog(ERROR, "cache lookup failed for multirange type %u",
1068 : typentry->type_id);
1069 :
1070 254 : typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
1071 254 : }
1072 :
1073 : /*
1074 : * load_domaintype_info --- helper routine to set up domain constraint info
1075 : *
1076 : * Note: we assume we're called in a relatively short-lived context, so it's
1077 : * okay to leak data into the current context while scanning pg_constraint.
1078 : * We build the new DomainConstraintCache data in a context underneath
1079 : * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1080 : * complete.
1081 : */
1082 : static void
1083 2588 : load_domaintype_info(TypeCacheEntry *typentry)
1084 : {
1085 2588 : Oid typeOid = typentry->type_id;
1086 : DomainConstraintCache *dcc;
1087 2588 : bool notNull = false;
1088 : DomainConstraintState **ccons;
1089 : int cconslen;
1090 : Relation conRel;
1091 : MemoryContext oldcxt;
1092 :
1093 : /*
1094 : * If we're here, any existing constraint info is stale, so release it.
1095 : * For safety, be sure to null the link before trying to delete the data.
1096 : */
1097 2588 : if (typentry->domainData)
1098 : {
1099 610 : dcc = typentry->domainData;
1100 610 : typentry->domainData = NULL;
1101 610 : decr_dcc_refcount(dcc);
1102 : }
1103 :
1104 : /*
1105 : * We try to optimize the common case of no domain constraints, so don't
1106 : * create the dcc object and context until we find a constraint. Likewise
1107 : * for the temp sorting array.
1108 : */
1109 2588 : dcc = NULL;
1110 2588 : ccons = NULL;
1111 2588 : cconslen = 0;
1112 :
1113 : /*
1114 : * Scan pg_constraint for relevant constraints. We want to find
1115 : * constraints for not just this domain, but any ancestor domains, so the
1116 : * outer loop crawls up the domain stack.
1117 : */
1118 2588 : conRel = table_open(ConstraintRelationId, AccessShareLock);
1119 :
1120 : for (;;)
1121 2636 : {
1122 : HeapTuple tup;
1123 : HeapTuple conTup;
1124 : Form_pg_type typTup;
1125 5224 : int nccons = 0;
1126 : ScanKeyData key[1];
1127 : SysScanDesc scan;
1128 :
1129 5224 : tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1130 5224 : if (!HeapTupleIsValid(tup))
1131 0 : elog(ERROR, "cache lookup failed for type %u", typeOid);
1132 5224 : typTup = (Form_pg_type) GETSTRUCT(tup);
1133 :
1134 5224 : if (typTup->typtype != TYPTYPE_DOMAIN)
1135 : {
1136 : /* Not a domain, so done */
1137 2588 : ReleaseSysCache(tup);
1138 2588 : break;
1139 : }
1140 :
1141 : /* Test for NOT NULL Constraint */
1142 2636 : if (typTup->typnotnull)
1143 154 : notNull = true;
1144 :
1145 : /* Look for CHECK Constraints on this domain */
1146 2636 : ScanKeyInit(&key[0],
1147 : Anum_pg_constraint_contypid,
1148 : BTEqualStrategyNumber, F_OIDEQ,
1149 : ObjectIdGetDatum(typeOid));
1150 :
1151 2636 : scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1152 : NULL, 1, key);
1153 :
1154 3978 : while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1155 : {
1156 1342 : Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
1157 : Datum val;
1158 : bool isNull;
1159 : char *constring;
1160 : Expr *check_expr;
1161 : DomainConstraintState *r;
1162 :
1163 : /* Ignore non-CHECK constraints */
1164 1342 : if (c->contype != CONSTRAINT_CHECK)
1165 154 : continue;
1166 :
1167 : /* Not expecting conbin to be NULL, but we'll test for it anyway */
1168 1188 : val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1169 : conRel->rd_att, &isNull);
1170 1188 : if (isNull)
1171 0 : elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1172 : NameStr(typTup->typname), NameStr(c->conname));
1173 :
1174 : /* Convert conbin to C string in caller context */
1175 1188 : constring = TextDatumGetCString(val);
1176 :
1177 : /* Create the DomainConstraintCache object and context if needed */
1178 1188 : if (dcc == NULL)
1179 : {
1180 : MemoryContext cxt;
1181 :
1182 1154 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1183 : "Domain constraints",
1184 : ALLOCSET_SMALL_SIZES);
1185 : dcc = (DomainConstraintCache *)
1186 1154 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1187 1154 : dcc->constraints = NIL;
1188 1154 : dcc->dccContext = cxt;
1189 1154 : dcc->dccRefCount = 0;
1190 : }
1191 :
1192 : /* Create node trees in DomainConstraintCache's context */
1193 1188 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1194 :
1195 1188 : check_expr = (Expr *) stringToNode(constring);
1196 :
1197 : /*
1198 : * Plan the expression, since ExecInitExpr will expect that.
1199 : *
1200 : * Note: caching the result of expression_planner() is not very
1201 : * good practice. Ideally we'd use a CachedExpression here so
1202 : * that we would react promptly to, eg, changes in inlined
1203 : * functions. However, because we don't support mutable domain
1204 : * CHECK constraints, it's not really clear that it's worth the
1205 : * extra overhead to do that.
1206 : */
1207 1188 : check_expr = expression_planner(check_expr);
1208 :
1209 1188 : r = makeNode(DomainConstraintState);
1210 1188 : r->constrainttype = DOM_CONSTRAINT_CHECK;
1211 1188 : r->name = pstrdup(NameStr(c->conname));
1212 1188 : r->check_expr = check_expr;
1213 1188 : r->check_exprstate = NULL;
1214 :
1215 1188 : MemoryContextSwitchTo(oldcxt);
1216 :
1217 : /* Accumulate constraints in an array, for sorting below */
1218 1188 : if (ccons == NULL)
1219 : {
1220 1154 : cconslen = 8;
1221 : ccons = (DomainConstraintState **)
1222 1154 : palloc(cconslen * sizeof(DomainConstraintState *));
1223 : }
1224 34 : else if (nccons >= cconslen)
1225 : {
1226 0 : cconslen *= 2;
1227 : ccons = (DomainConstraintState **)
1228 0 : repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1229 : }
1230 1188 : ccons[nccons++] = r;
1231 : }
1232 :
1233 2636 : systable_endscan(scan);
1234 :
1235 2636 : if (nccons > 0)
1236 : {
1237 : /*
1238 : * Sort the items for this domain, so that CHECKs are applied in a
1239 : * deterministic order.
1240 : */
1241 1178 : if (nccons > 1)
1242 8 : qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1243 :
1244 : /*
1245 : * Now attach them to the overall list. Use lcons() here because
1246 : * constraints of parent domains should be applied earlier.
1247 : */
1248 1178 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1249 2366 : while (nccons > 0)
1250 1188 : dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1251 1178 : MemoryContextSwitchTo(oldcxt);
1252 : }
1253 :
1254 : /* loop to next domain in stack */
1255 2636 : typeOid = typTup->typbasetype;
1256 2636 : ReleaseSysCache(tup);
1257 : }
1258 :
1259 2588 : table_close(conRel, AccessShareLock);
1260 :
1261 : /*
1262 : * Only need to add one NOT NULL check regardless of how many domains in
1263 : * the stack request it.
1264 : */
1265 2588 : if (notNull)
1266 : {
1267 : DomainConstraintState *r;
1268 :
1269 : /* Create the DomainConstraintCache object and context if needed */
1270 154 : if (dcc == NULL)
1271 : {
1272 : MemoryContext cxt;
1273 :
1274 116 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1275 : "Domain constraints",
1276 : ALLOCSET_SMALL_SIZES);
1277 : dcc = (DomainConstraintCache *)
1278 116 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1279 116 : dcc->constraints = NIL;
1280 116 : dcc->dccContext = cxt;
1281 116 : dcc->dccRefCount = 0;
1282 : }
1283 :
1284 : /* Create node trees in DomainConstraintCache's context */
1285 154 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1286 :
1287 154 : r = makeNode(DomainConstraintState);
1288 :
1289 154 : r->constrainttype = DOM_CONSTRAINT_NOTNULL;
1290 154 : r->name = pstrdup("NOT NULL");
1291 154 : r->check_expr = NULL;
1292 154 : r->check_exprstate = NULL;
1293 :
1294 : /* lcons to apply the nullness check FIRST */
1295 154 : dcc->constraints = lcons(r, dcc->constraints);
1296 :
1297 154 : MemoryContextSwitchTo(oldcxt);
1298 : }
1299 :
1300 : /*
1301 : * If we made a constraint object, move it into CacheMemoryContext and
1302 : * attach it to the typcache entry.
1303 : */
1304 2588 : if (dcc)
1305 : {
1306 1270 : MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
1307 1270 : typentry->domainData = dcc;
1308 1270 : dcc->dccRefCount++; /* count the typcache's reference */
1309 : }
1310 :
1311 : /* Either way, the typcache entry's domain data is now valid. */
1312 2588 : typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
1313 2588 : }
1314 :
1315 : /*
1316 : * qsort comparator to sort DomainConstraintState pointers by name
1317 : */
1318 : static int
1319 10 : dcs_cmp(const void *a, const void *b)
1320 : {
1321 10 : const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1322 10 : const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1323 :
1324 10 : return strcmp((*ca)->name, (*cb)->name);
1325 : }
1326 :
1327 : /*
1328 : * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1329 : * and free it if no references remain
1330 : */
1331 : static void
1332 12636 : decr_dcc_refcount(DomainConstraintCache *dcc)
1333 : {
1334 : Assert(dcc->dccRefCount > 0);
1335 12636 : if (--(dcc->dccRefCount) <= 0)
1336 606 : MemoryContextDelete(dcc->dccContext);
1337 12636 : }
1338 :
1339 : /*
1340 : * Context reset/delete callback for a DomainConstraintRef
1341 : */
1342 : static void
1343 12720 : dccref_deletion_callback(void *arg)
1344 : {
1345 12720 : DomainConstraintRef *ref = (DomainConstraintRef *) arg;
1346 12720 : DomainConstraintCache *dcc = ref->dcc;
1347 :
1348 : /* Paranoia --- be sure link is nulled before trying to release */
1349 12720 : if (dcc)
1350 : {
1351 12026 : ref->constraints = NIL;
1352 12026 : ref->dcc = NULL;
1353 12026 : decr_dcc_refcount(dcc);
1354 : }
1355 12720 : }
1356 :
1357 : /*
1358 : * prep_domain_constraints --- prepare domain constraints for execution
1359 : *
1360 : * The expression trees stored in the DomainConstraintCache's list are
1361 : * converted to executable expression state trees stored in execctx.
1362 : */
1363 : static List *
1364 2560 : prep_domain_constraints(List *constraints, MemoryContext execctx)
1365 : {
1366 2560 : List *result = NIL;
1367 : MemoryContext oldcxt;
1368 : ListCell *lc;
1369 :
1370 2560 : oldcxt = MemoryContextSwitchTo(execctx);
1371 :
1372 5144 : foreach(lc, constraints)
1373 : {
1374 2584 : DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
1375 : DomainConstraintState *newr;
1376 :
1377 2584 : newr = makeNode(DomainConstraintState);
1378 2584 : newr->constrainttype = r->constrainttype;
1379 2584 : newr->name = r->name;
1380 2584 : newr->check_expr = r->check_expr;
1381 2584 : newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1382 :
1383 2584 : result = lappend(result, newr);
1384 : }
1385 :
1386 2560 : MemoryContextSwitchTo(oldcxt);
1387 :
1388 2560 : return result;
1389 : }
1390 :
1391 : /*
1392 : * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1393 : *
1394 : * Caller must tell us the MemoryContext in which the DomainConstraintRef
1395 : * lives. The ref will be cleaned up when that context is reset/deleted.
1396 : *
1397 : * Caller must also tell us whether it wants check_exprstate fields to be
1398 : * computed in the DomainConstraintState nodes attached to this ref.
1399 : * If it doesn't, we need not make a copy of the DomainConstraintState list.
1400 : */
1401 : void
1402 12748 : InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
1403 : MemoryContext refctx, bool need_exprstate)
1404 : {
1405 : /* Look up the typcache entry --- we assume it survives indefinitely */
1406 12748 : ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1407 12748 : ref->need_exprstate = need_exprstate;
1408 : /* For safety, establish the callback before acquiring a refcount */
1409 12748 : ref->refctx = refctx;
1410 12748 : ref->dcc = NULL;
1411 12748 : ref->callback.func = dccref_deletion_callback;
1412 12748 : ref->callback.arg = ref;
1413 12748 : MemoryContextRegisterResetCallback(refctx, &ref->callback);
1414 : /* Acquire refcount if there are constraints, and set up exported list */
1415 12748 : if (ref->tcache->domainData)
1416 : {
1417 12054 : ref->dcc = ref->tcache->domainData;
1418 12054 : ref->dcc->dccRefCount++;
1419 12054 : if (ref->need_exprstate)
1420 2560 : ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1421 : ref->refctx);
1422 : else
1423 9494 : ref->constraints = ref->dcc->constraints;
1424 : }
1425 : else
1426 694 : ref->constraints = NIL;
1427 12748 : }
1428 :
1429 : /*
1430 : * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1431 : *
1432 : * If the domain's constraint set changed, ref->constraints is updated to
1433 : * point at a new list of cached constraints.
1434 : *
1435 : * In the normal case where nothing happened to the domain, this is cheap
1436 : * enough that it's reasonable (and expected) to check before *each* use
1437 : * of the constraint info.
1438 : */
1439 : void
1440 429540 : UpdateDomainConstraintRef(DomainConstraintRef *ref)
1441 : {
1442 429540 : TypeCacheEntry *typentry = ref->tcache;
1443 :
1444 : /* Make sure typcache entry's data is up to date */
1445 429540 : if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1446 0 : typentry->typtype == TYPTYPE_DOMAIN)
1447 0 : load_domaintype_info(typentry);
1448 :
1449 : /* Transfer to ref object if there's new info, adjusting refcounts */
1450 429540 : if (ref->dcc != typentry->domainData)
1451 : {
1452 : /* Paranoia --- be sure link is nulled before trying to release */
1453 0 : DomainConstraintCache *dcc = ref->dcc;
1454 :
1455 0 : if (dcc)
1456 : {
1457 : /*
1458 : * Note: we just leak the previous list of executable domain
1459 : * constraints. Alternatively, we could keep those in a child
1460 : * context of ref->refctx and free that context at this point.
1461 : * However, in practice this code path will be taken so seldom
1462 : * that the extra bookkeeping for a child context doesn't seem
1463 : * worthwhile; we'll just allow a leak for the lifespan of refctx.
1464 : */
1465 0 : ref->constraints = NIL;
1466 0 : ref->dcc = NULL;
1467 0 : decr_dcc_refcount(dcc);
1468 : }
1469 0 : dcc = typentry->domainData;
1470 0 : if (dcc)
1471 : {
1472 0 : ref->dcc = dcc;
1473 0 : dcc->dccRefCount++;
1474 0 : if (ref->need_exprstate)
1475 0 : ref->constraints = prep_domain_constraints(dcc->constraints,
1476 : ref->refctx);
1477 : else
1478 0 : ref->constraints = dcc->constraints;
1479 : }
1480 : }
1481 429540 : }
1482 :
1483 : /*
1484 : * DomainHasConstraints --- utility routine to check if a domain has constraints
1485 : *
1486 : * This is defined to return false, not fail, if type is not a domain.
1487 : */
1488 : bool
1489 29644 : DomainHasConstraints(Oid type_id)
1490 : {
1491 : TypeCacheEntry *typentry;
1492 :
1493 : /*
1494 : * Note: a side effect is to cause the typcache's domain data to become
1495 : * valid. This is fine since we'll likely need it soon if there is any.
1496 : */
1497 29644 : typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1498 :
1499 29644 : return (typentry->domainData != NULL);
1500 : }
1501 :
1502 :
1503 : /*
1504 : * array_element_has_equality and friends are helper routines to check
1505 : * whether we should believe that array_eq and related functions will work
1506 : * on the given array type or composite type.
1507 : *
1508 : * The logic above may call these repeatedly on the same type entry, so we
1509 : * make use of the typentry->flags field to cache the results once known.
1510 : * Also, we assume that we'll probably want all these facts about the type
1511 : * if we want any, so we cache them all using only one lookup of the
1512 : * component datatype(s).
1513 : */
1514 :
1515 : static bool
1516 2978 : array_element_has_equality(TypeCacheEntry *typentry)
1517 : {
1518 2978 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1519 2488 : cache_array_element_properties(typentry);
1520 2978 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1521 : }
1522 :
1523 : static bool
1524 5434 : array_element_has_compare(TypeCacheEntry *typentry)
1525 : {
1526 5434 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1527 542 : cache_array_element_properties(typentry);
1528 5434 : return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1529 : }
1530 :
1531 : static bool
1532 2092 : array_element_has_hashing(TypeCacheEntry *typentry)
1533 : {
1534 2092 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1535 0 : cache_array_element_properties(typentry);
1536 2092 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1537 : }
1538 :
1539 : static bool
1540 360 : array_element_has_extended_hashing(TypeCacheEntry *typentry)
1541 : {
1542 360 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1543 0 : cache_array_element_properties(typentry);
1544 360 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1545 : }
1546 :
1547 : static void
1548 3030 : cache_array_element_properties(TypeCacheEntry *typentry)
1549 : {
1550 3030 : Oid elem_type = get_base_element_type(typentry->type_id);
1551 :
1552 3030 : if (OidIsValid(elem_type))
1553 : {
1554 : TypeCacheEntry *elementry;
1555 :
1556 2836 : elementry = lookup_type_cache(elem_type,
1557 : TYPECACHE_EQ_OPR |
1558 : TYPECACHE_CMP_PROC |
1559 : TYPECACHE_HASH_PROC |
1560 : TYPECACHE_HASH_EXTENDED_PROC);
1561 2836 : if (OidIsValid(elementry->eq_opr))
1562 2662 : typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1563 2836 : if (OidIsValid(elementry->cmp_proc))
1564 2472 : typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1565 2836 : if (OidIsValid(elementry->hash_proc))
1566 2650 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1567 2836 : if (OidIsValid(elementry->hash_extended_proc))
1568 2650 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1569 : }
1570 3030 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1571 3030 : }
1572 :
1573 : /*
1574 : * Likewise, some helper functions for composite types.
1575 : */
1576 :
1577 : static bool
1578 436 : record_fields_have_equality(TypeCacheEntry *typentry)
1579 : {
1580 436 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1581 408 : cache_record_field_properties(typentry);
1582 436 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1583 : }
1584 :
1585 : static bool
1586 512 : record_fields_have_compare(TypeCacheEntry *typentry)
1587 : {
1588 512 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1589 60 : cache_record_field_properties(typentry);
1590 512 : return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1591 : }
1592 :
1593 : static bool
1594 420 : record_fields_have_hashing(TypeCacheEntry *typentry)
1595 : {
1596 420 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1597 6 : cache_record_field_properties(typentry);
1598 420 : return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1599 : }
1600 :
1601 : static bool
1602 188 : record_fields_have_extended_hashing(TypeCacheEntry *typentry)
1603 : {
1604 188 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1605 0 : cache_record_field_properties(typentry);
1606 188 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1607 : }
1608 :
1609 : static void
1610 474 : cache_record_field_properties(TypeCacheEntry *typentry)
1611 : {
1612 : /*
1613 : * For type RECORD, we can't really tell what will work, since we don't
1614 : * have access here to the specific anonymous type. Just assume that
1615 : * equality and comparison will (we may get a failure at runtime). We
1616 : * could also claim that hashing works, but then if code that has the
1617 : * option between a comparison-based (sort-based) and a hash-based plan
1618 : * chooses hashing, stuff could fail that would otherwise work if it chose
1619 : * a comparison-based plan. In practice more types support comparison
1620 : * than hashing.
1621 : */
1622 474 : if (typentry->type_id == RECORDOID)
1623 : {
1624 50 : typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1625 : TCFLAGS_HAVE_FIELD_COMPARE);
1626 : }
1627 424 : else if (typentry->typtype == TYPTYPE_COMPOSITE)
1628 : {
1629 : TupleDesc tupdesc;
1630 : int newflags;
1631 : int i;
1632 :
1633 : /* Fetch composite type's tupdesc if we don't have it already */
1634 424 : if (typentry->tupDesc == NULL)
1635 238 : load_typcache_tupdesc(typentry);
1636 424 : tupdesc = typentry->tupDesc;
1637 :
1638 : /* Must bump the refcount while we do additional catalog lookups */
1639 424 : IncrTupleDescRefCount(tupdesc);
1640 :
1641 : /* Have each property if all non-dropped fields have the property */
1642 424 : newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1643 : TCFLAGS_HAVE_FIELD_COMPARE |
1644 : TCFLAGS_HAVE_FIELD_HASHING |
1645 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1646 5528 : for (i = 0; i < tupdesc->natts; i++)
1647 : {
1648 : TypeCacheEntry *fieldentry;
1649 5296 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1650 :
1651 5296 : if (attr->attisdropped)
1652 0 : continue;
1653 :
1654 5296 : fieldentry = lookup_type_cache(attr->atttypid,
1655 : TYPECACHE_EQ_OPR |
1656 : TYPECACHE_CMP_PROC |
1657 : TYPECACHE_HASH_PROC |
1658 : TYPECACHE_HASH_EXTENDED_PROC);
1659 5296 : if (!OidIsValid(fieldentry->eq_opr))
1660 192 : newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1661 5296 : if (!OidIsValid(fieldentry->cmp_proc))
1662 192 : newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1663 5296 : if (!OidIsValid(fieldentry->hash_proc))
1664 198 : newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1665 5296 : if (!OidIsValid(fieldentry->hash_extended_proc))
1666 198 : newflags &= ~TCFLAGS_HAVE_FIELD_EXTENDED_HASHING;
1667 :
1668 : /* We can drop out of the loop once we disprove all bits */
1669 5296 : if (newflags == 0)
1670 192 : break;
1671 : }
1672 424 : typentry->flags |= newflags;
1673 :
1674 424 : DecrTupleDescRefCount(tupdesc);
1675 : }
1676 0 : else if (typentry->typtype == TYPTYPE_DOMAIN)
1677 : {
1678 : /* If it's domain over composite, copy base type's properties */
1679 : TypeCacheEntry *baseentry;
1680 :
1681 : /* load up basetype info if we didn't already */
1682 0 : if (typentry->domainBaseType == InvalidOid)
1683 : {
1684 0 : typentry->domainBaseTypmod = -1;
1685 0 : typentry->domainBaseType =
1686 0 : getBaseTypeAndTypmod(typentry->type_id,
1687 : &typentry->domainBaseTypmod);
1688 : }
1689 0 : baseentry = lookup_type_cache(typentry->domainBaseType,
1690 : TYPECACHE_EQ_OPR |
1691 : TYPECACHE_CMP_PROC |
1692 : TYPECACHE_HASH_PROC |
1693 : TYPECACHE_HASH_EXTENDED_PROC);
1694 0 : if (baseentry->typtype == TYPTYPE_COMPOSITE)
1695 : {
1696 0 : typentry->flags |= TCFLAGS_DOMAIN_BASE_IS_COMPOSITE;
1697 0 : typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1698 : TCFLAGS_HAVE_FIELD_COMPARE |
1699 : TCFLAGS_HAVE_FIELD_HASHING |
1700 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1701 : }
1702 : }
1703 474 : typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
1704 474 : }
1705 :
1706 : /*
1707 : * Likewise, some helper functions for range and multirange types.
1708 : *
1709 : * We can borrow the flag bits for array element properties to use for range
1710 : * element properties, since those flag bits otherwise have no use in a
1711 : * range or multirange type's typcache entry.
1712 : */
1713 :
1714 : static bool
1715 120 : range_element_has_hashing(TypeCacheEntry *typentry)
1716 : {
1717 120 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1718 120 : cache_range_element_properties(typentry);
1719 120 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1720 : }
1721 :
1722 : static bool
1723 0 : range_element_has_extended_hashing(TypeCacheEntry *typentry)
1724 : {
1725 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1726 0 : cache_range_element_properties(typentry);
1727 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1728 : }
1729 :
1730 : static void
1731 120 : cache_range_element_properties(TypeCacheEntry *typentry)
1732 : {
1733 : /* load up subtype link if we didn't already */
1734 120 : if (typentry->rngelemtype == NULL &&
1735 88 : typentry->typtype == TYPTYPE_RANGE)
1736 88 : load_rangetype_info(typentry);
1737 :
1738 120 : if (typentry->rngelemtype != NULL)
1739 : {
1740 : TypeCacheEntry *elementry;
1741 :
1742 : /* might need to calculate subtype's hash function properties */
1743 120 : elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1744 : TYPECACHE_HASH_PROC |
1745 : TYPECACHE_HASH_EXTENDED_PROC);
1746 120 : if (OidIsValid(elementry->hash_proc))
1747 114 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1748 120 : if (OidIsValid(elementry->hash_extended_proc))
1749 114 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1750 : }
1751 120 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1752 120 : }
1753 :
1754 : static bool
1755 18 : multirange_element_has_hashing(TypeCacheEntry *typentry)
1756 : {
1757 18 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1758 18 : cache_multirange_element_properties(typentry);
1759 18 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1760 : }
1761 :
1762 : static bool
1763 0 : multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
1764 : {
1765 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1766 0 : cache_multirange_element_properties(typentry);
1767 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1768 : }
1769 :
1770 : static void
1771 18 : cache_multirange_element_properties(TypeCacheEntry *typentry)
1772 : {
1773 : /* load up range link if we didn't already */
1774 18 : if (typentry->rngtype == NULL &&
1775 0 : typentry->typtype == TYPTYPE_MULTIRANGE)
1776 0 : load_multirangetype_info(typentry);
1777 :
1778 18 : if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1779 : {
1780 : TypeCacheEntry *elementry;
1781 :
1782 : /* might need to calculate subtype's hash function properties */
1783 18 : elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1784 : TYPECACHE_HASH_PROC |
1785 : TYPECACHE_HASH_EXTENDED_PROC);
1786 18 : if (OidIsValid(elementry->hash_proc))
1787 12 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1788 18 : if (OidIsValid(elementry->hash_extended_proc))
1789 12 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1790 : }
1791 18 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1792 18 : }
1793 :
1794 : /*
1795 : * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1796 : * to store 'typmod'.
1797 : */
1798 : static void
1799 16580 : ensure_record_cache_typmod_slot_exists(int32 typmod)
1800 : {
1801 16580 : if (RecordCacheArray == NULL)
1802 : {
1803 6402 : RecordCacheArray = (RecordCacheArrayEntry *)
1804 6402 : MemoryContextAllocZero(CacheMemoryContext,
1805 : 64 * sizeof(RecordCacheArrayEntry));
1806 6402 : RecordCacheArrayLen = 64;
1807 : }
1808 :
1809 16580 : if (typmod >= RecordCacheArrayLen)
1810 : {
1811 0 : int32 newlen = pg_nextpower2_32(typmod + 1);
1812 :
1813 0 : RecordCacheArray = repalloc0_array(RecordCacheArray,
1814 : RecordCacheArrayEntry,
1815 : RecordCacheArrayLen,
1816 : newlen);
1817 0 : RecordCacheArrayLen = newlen;
1818 : }
1819 16580 : }
1820 :
1821 : /*
1822 : * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1823 : *
1824 : * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1825 : * hasn't had its refcount bumped.
1826 : */
1827 : static TupleDesc
1828 124124 : lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1829 : {
1830 124124 : if (type_id != RECORDOID)
1831 : {
1832 : /*
1833 : * It's a named composite type, so use the regular typcache.
1834 : */
1835 : TypeCacheEntry *typentry;
1836 :
1837 58084 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1838 58082 : if (typentry->tupDesc == NULL && !noError)
1839 0 : ereport(ERROR,
1840 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1841 : errmsg("type %s is not composite",
1842 : format_type_be(type_id))));
1843 58082 : return typentry->tupDesc;
1844 : }
1845 : else
1846 : {
1847 : /*
1848 : * It's a transient record type, so look in our record-type table.
1849 : */
1850 66040 : if (typmod >= 0)
1851 : {
1852 : /* It is already in our local cache? */
1853 66024 : if (typmod < RecordCacheArrayLen &&
1854 66018 : RecordCacheArray[typmod].tupdesc != NULL)
1855 65994 : return RecordCacheArray[typmod].tupdesc;
1856 :
1857 : /* Are we attached to a shared record typmod registry? */
1858 30 : if (CurrentSession->shared_typmod_registry != NULL)
1859 : {
1860 : SharedTypmodTableEntry *entry;
1861 :
1862 : /* Try to find it in the shared typmod index. */
1863 30 : entry = dshash_find(CurrentSession->shared_typmod_table,
1864 : &typmod, false);
1865 30 : if (entry != NULL)
1866 : {
1867 : TupleDesc tupdesc;
1868 :
1869 : tupdesc = (TupleDesc)
1870 30 : dsa_get_address(CurrentSession->area,
1871 : entry->shared_tupdesc);
1872 : Assert(typmod == tupdesc->tdtypmod);
1873 :
1874 : /* We may need to extend the local RecordCacheArray. */
1875 30 : ensure_record_cache_typmod_slot_exists(typmod);
1876 :
1877 : /*
1878 : * Our local array can now point directly to the TupleDesc
1879 : * in shared memory, which is non-reference-counted.
1880 : */
1881 30 : RecordCacheArray[typmod].tupdesc = tupdesc;
1882 : Assert(tupdesc->tdrefcount == -1);
1883 :
1884 : /*
1885 : * We don't share tupdesc identifiers across processes, so
1886 : * assign one locally.
1887 : */
1888 30 : RecordCacheArray[typmod].id = ++tupledesc_id_counter;
1889 :
1890 30 : dshash_release_lock(CurrentSession->shared_typmod_table,
1891 : entry);
1892 :
1893 30 : return RecordCacheArray[typmod].tupdesc;
1894 : }
1895 : }
1896 : }
1897 :
1898 16 : if (!noError)
1899 0 : ereport(ERROR,
1900 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1901 : errmsg("record type has not been registered")));
1902 16 : return NULL;
1903 : }
1904 : }
1905 :
1906 : /*
1907 : * lookup_rowtype_tupdesc
1908 : *
1909 : * Given a typeid/typmod that should describe a known composite type,
1910 : * return the tuple descriptor for the type. Will ereport on failure.
1911 : * (Use ereport because this is reachable with user-specified OIDs,
1912 : * for example from record_in().)
1913 : *
1914 : * Note: on success, we increment the refcount of the returned TupleDesc,
1915 : * and log the reference in CurrentResourceOwner. Caller must call
1916 : * ReleaseTupleDesc when done using the tupdesc. (There are some
1917 : * cases in which the returned tupdesc is not refcounted, in which
1918 : * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1919 : * the tupdesc is guaranteed to live till process exit.)
1920 : */
1921 : TupleDesc
1922 73254 : lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1923 : {
1924 : TupleDesc tupDesc;
1925 :
1926 73254 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1927 73252 : PinTupleDesc(tupDesc);
1928 73252 : return tupDesc;
1929 : }
1930 :
1931 : /*
1932 : * lookup_rowtype_tupdesc_noerror
1933 : *
1934 : * As above, but if the type is not a known composite type and noError
1935 : * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1936 : * type_id is passed, you'll get an ereport anyway.)
1937 : */
1938 : TupleDesc
1939 20 : lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1940 : {
1941 : TupleDesc tupDesc;
1942 :
1943 20 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1944 20 : if (tupDesc != NULL)
1945 20 : PinTupleDesc(tupDesc);
1946 20 : return tupDesc;
1947 : }
1948 :
1949 : /*
1950 : * lookup_rowtype_tupdesc_copy
1951 : *
1952 : * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1953 : * copied into the CurrentMemoryContext and is not reference-counted.
1954 : */
1955 : TupleDesc
1956 50832 : lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1957 : {
1958 : TupleDesc tmp;
1959 :
1960 50832 : tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1961 50832 : return CreateTupleDescCopyConstr(tmp);
1962 : }
1963 :
1964 : /*
1965 : * lookup_rowtype_tupdesc_domain
1966 : *
1967 : * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1968 : * a domain over a named composite type; so this is effectively equivalent to
1969 : * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1970 : * except for being a tad faster.
1971 : *
1972 : * Note: the reason we don't fold the look-through-domain behavior into plain
1973 : * lookup_rowtype_tupdesc() is that we want callers to know they might be
1974 : * dealing with a domain. Otherwise they might construct a tuple that should
1975 : * be of the domain type, but not apply domain constraints.
1976 : */
1977 : TupleDesc
1978 2664 : lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1979 : {
1980 : TupleDesc tupDesc;
1981 :
1982 2664 : if (type_id != RECORDOID)
1983 : {
1984 : /*
1985 : * Check for domain or named composite type. We might as well load
1986 : * whichever data is needed.
1987 : */
1988 : TypeCacheEntry *typentry;
1989 :
1990 2646 : typentry = lookup_type_cache(type_id,
1991 : TYPECACHE_TUPDESC |
1992 : TYPECACHE_DOMAIN_BASE_INFO);
1993 2646 : if (typentry->typtype == TYPTYPE_DOMAIN)
1994 20 : return lookup_rowtype_tupdesc_noerror(typentry->domainBaseType,
1995 : typentry->domainBaseTypmod,
1996 : noError);
1997 2626 : if (typentry->tupDesc == NULL && !noError)
1998 0 : ereport(ERROR,
1999 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2000 : errmsg("type %s is not composite",
2001 : format_type_be(type_id))));
2002 2626 : tupDesc = typentry->tupDesc;
2003 : }
2004 : else
2005 18 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2006 2644 : if (tupDesc != NULL)
2007 2628 : PinTupleDesc(tupDesc);
2008 2644 : return tupDesc;
2009 : }
2010 :
2011 : /*
2012 : * Hash function for the hash table of RecordCacheEntry.
2013 : */
2014 : static uint32
2015 375042 : record_type_typmod_hash(const void *data, size_t size)
2016 : {
2017 375042 : RecordCacheEntry *entry = (RecordCacheEntry *) data;
2018 :
2019 375042 : return hashRowType(entry->tupdesc);
2020 : }
2021 :
2022 : /*
2023 : * Match function for the hash table of RecordCacheEntry.
2024 : */
2025 : static int
2026 345628 : record_type_typmod_compare(const void *a, const void *b, size_t size)
2027 : {
2028 345628 : RecordCacheEntry *left = (RecordCacheEntry *) a;
2029 345628 : RecordCacheEntry *right = (RecordCacheEntry *) b;
2030 :
2031 345628 : return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2032 : }
2033 :
2034 : /*
2035 : * assign_record_type_typmod
2036 : *
2037 : * Given a tuple descriptor for a RECORD type, find or create a cache entry
2038 : * for the type, and set the tupdesc's tdtypmod field to a value that will
2039 : * identify this cache entry to lookup_rowtype_tupdesc.
2040 : */
2041 : void
2042 358492 : assign_record_type_typmod(TupleDesc tupDesc)
2043 : {
2044 : RecordCacheEntry *recentry;
2045 : TupleDesc entDesc;
2046 : bool found;
2047 : MemoryContext oldcxt;
2048 :
2049 : Assert(tupDesc->tdtypeid == RECORDOID);
2050 :
2051 358492 : if (RecordCacheHash == NULL)
2052 : {
2053 : /* First time through: initialize the hash table */
2054 : HASHCTL ctl;
2055 :
2056 6402 : ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2057 6402 : ctl.entrysize = sizeof(RecordCacheEntry);
2058 6402 : ctl.hash = record_type_typmod_hash;
2059 6402 : ctl.match = record_type_typmod_compare;
2060 6402 : RecordCacheHash = hash_create("Record information cache", 64,
2061 : &ctl,
2062 : HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
2063 :
2064 : /* Also make sure CacheMemoryContext exists */
2065 6402 : if (!CacheMemoryContext)
2066 0 : CreateCacheMemoryContext();
2067 : }
2068 :
2069 : /*
2070 : * Find a hashtable entry for this tuple descriptor. We don't use
2071 : * HASH_ENTER yet, because if it's missing, we need to make sure that all
2072 : * the allocations succeed before we create the new entry.
2073 : */
2074 358492 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2075 : &tupDesc,
2076 : HASH_FIND, &found);
2077 358492 : if (found && recentry->tupdesc != NULL)
2078 : {
2079 341942 : tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2080 341942 : return;
2081 : }
2082 :
2083 : /* Not present, so need to manufacture an entry */
2084 16550 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2085 :
2086 : /* Look in the SharedRecordTypmodRegistry, if attached */
2087 16550 : entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
2088 16550 : if (entDesc == NULL)
2089 : {
2090 : /*
2091 : * Make sure we have room before we CreateTupleDescCopy() or advance
2092 : * NextRecordTypmod.
2093 : */
2094 16476 : ensure_record_cache_typmod_slot_exists(NextRecordTypmod);
2095 :
2096 : /* Reference-counted local cache only. */
2097 16476 : entDesc = CreateTupleDescCopy(tupDesc);
2098 16476 : entDesc->tdrefcount = 1;
2099 16476 : entDesc->tdtypmod = NextRecordTypmod++;
2100 : }
2101 : else
2102 : {
2103 74 : ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod);
2104 : }
2105 :
2106 16550 : RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
2107 :
2108 : /* Assign a unique tupdesc identifier, too. */
2109 16550 : RecordCacheArray[entDesc->tdtypmod].id = ++tupledesc_id_counter;
2110 :
2111 : /* Fully initialized; create the hash table entry */
2112 16550 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2113 : &tupDesc,
2114 : HASH_ENTER, NULL);
2115 16550 : recentry->tupdesc = entDesc;
2116 :
2117 : /* Update the caller's tuple descriptor. */
2118 16550 : tupDesc->tdtypmod = entDesc->tdtypmod;
2119 :
2120 16550 : MemoryContextSwitchTo(oldcxt);
2121 : }
2122 :
2123 : /*
2124 : * assign_record_type_identifier
2125 : *
2126 : * Get an identifier, which will be unique over the lifespan of this backend
2127 : * process, for the current tuple descriptor of the specified composite type.
2128 : * For named composite types, the value is guaranteed to change if the type's
2129 : * definition does. For registered RECORD types, the value will not change
2130 : * once assigned, since the registered type won't either. If an anonymous
2131 : * RECORD type is specified, we return a new identifier on each call.
2132 : */
2133 : uint64
2134 5496 : assign_record_type_identifier(Oid type_id, int32 typmod)
2135 : {
2136 5496 : if (type_id != RECORDOID)
2137 : {
2138 : /*
2139 : * It's a named composite type, so use the regular typcache.
2140 : */
2141 : TypeCacheEntry *typentry;
2142 :
2143 0 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2144 0 : if (typentry->tupDesc == NULL)
2145 0 : ereport(ERROR,
2146 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2147 : errmsg("type %s is not composite",
2148 : format_type_be(type_id))));
2149 : Assert(typentry->tupDesc_identifier != 0);
2150 0 : return typentry->tupDesc_identifier;
2151 : }
2152 : else
2153 : {
2154 : /*
2155 : * It's a transient record type, so look in our record-type table.
2156 : */
2157 5496 : if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2158 60 : RecordCacheArray[typmod].tupdesc != NULL)
2159 : {
2160 : Assert(RecordCacheArray[typmod].id != 0);
2161 60 : return RecordCacheArray[typmod].id;
2162 : }
2163 :
2164 : /* For anonymous or unrecognized record type, generate a new ID */
2165 5436 : return ++tupledesc_id_counter;
2166 : }
2167 : }
2168 :
2169 : /*
2170 : * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2171 : * This exists only to avoid exposing private innards of
2172 : * SharedRecordTypmodRegistry in a header.
2173 : */
2174 : size_t
2175 138 : SharedRecordTypmodRegistryEstimate(void)
2176 : {
2177 138 : return sizeof(SharedRecordTypmodRegistry);
2178 : }
2179 :
2180 : /*
2181 : * Initialize 'registry' in a pre-existing shared memory region, which must be
2182 : * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2183 : * bytes.
2184 : *
2185 : * 'area' will be used to allocate shared memory space as required for the
2186 : * typemod registration. The current process, expected to be a leader process
2187 : * in a parallel query, will be attached automatically and its current record
2188 : * types will be loaded into *registry. While attached, all calls to
2189 : * assign_record_type_typmod will use the shared registry. Worker backends
2190 : * will need to attach explicitly.
2191 : *
2192 : * Note that this function takes 'area' and 'segment' as arguments rather than
2193 : * accessing them via CurrentSession, because they aren't installed there
2194 : * until after this function runs.
2195 : */
2196 : void
2197 138 : SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry,
2198 : dsm_segment *segment,
2199 : dsa_area *area)
2200 : {
2201 : MemoryContext old_context;
2202 : dshash_table *record_table;
2203 : dshash_table *typmod_table;
2204 : int32 typmod;
2205 :
2206 : Assert(!IsParallelWorker());
2207 :
2208 : /* We can't already be attached to a shared registry. */
2209 : Assert(CurrentSession->shared_typmod_registry == NULL);
2210 : Assert(CurrentSession->shared_record_table == NULL);
2211 : Assert(CurrentSession->shared_typmod_table == NULL);
2212 :
2213 138 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2214 :
2215 : /* Create the hash table of tuple descriptors indexed by themselves. */
2216 138 : record_table = dshash_create(area, &srtr_record_table_params, area);
2217 :
2218 : /* Create the hash table of tuple descriptors indexed by typmod. */
2219 138 : typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2220 :
2221 138 : MemoryContextSwitchTo(old_context);
2222 :
2223 : /* Initialize the SharedRecordTypmodRegistry. */
2224 138 : registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2225 138 : registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2226 138 : pg_atomic_init_u32(®istry->next_typmod, NextRecordTypmod);
2227 :
2228 : /*
2229 : * Copy all entries from this backend's private registry into the shared
2230 : * registry.
2231 : */
2232 244 : for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2233 : {
2234 : SharedTypmodTableEntry *typmod_table_entry;
2235 : SharedRecordTableEntry *record_table_entry;
2236 : SharedRecordTableKey record_table_key;
2237 : dsa_pointer shared_dp;
2238 : TupleDesc tupdesc;
2239 : bool found;
2240 :
2241 106 : tupdesc = RecordCacheArray[typmod].tupdesc;
2242 106 : if (tupdesc == NULL)
2243 0 : continue;
2244 :
2245 : /* Copy the TupleDesc into shared memory. */
2246 106 : shared_dp = share_tupledesc(area, tupdesc, typmod);
2247 :
2248 : /* Insert into the typmod table. */
2249 106 : typmod_table_entry = dshash_find_or_insert(typmod_table,
2250 106 : &tupdesc->tdtypmod,
2251 : &found);
2252 106 : if (found)
2253 0 : elog(ERROR, "cannot create duplicate shared record typmod");
2254 106 : typmod_table_entry->typmod = tupdesc->tdtypmod;
2255 106 : typmod_table_entry->shared_tupdesc = shared_dp;
2256 106 : dshash_release_lock(typmod_table, typmod_table_entry);
2257 :
2258 : /* Insert into the record table. */
2259 106 : record_table_key.shared = false;
2260 106 : record_table_key.u.local_tupdesc = tupdesc;
2261 106 : record_table_entry = dshash_find_or_insert(record_table,
2262 : &record_table_key,
2263 : &found);
2264 106 : if (!found)
2265 : {
2266 106 : record_table_entry->key.shared = true;
2267 106 : record_table_entry->key.u.shared_tupdesc = shared_dp;
2268 : }
2269 106 : dshash_release_lock(record_table, record_table_entry);
2270 : }
2271 :
2272 : /*
2273 : * Set up the global state that will tell assign_record_type_typmod and
2274 : * lookup_rowtype_tupdesc_internal about the shared registry.
2275 : */
2276 138 : CurrentSession->shared_record_table = record_table;
2277 138 : CurrentSession->shared_typmod_table = typmod_table;
2278 138 : CurrentSession->shared_typmod_registry = registry;
2279 :
2280 : /*
2281 : * We install a detach hook in the leader, but only to handle cleanup on
2282 : * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2283 : * the memory, the leader process will use a shared registry until it
2284 : * exits.
2285 : */
2286 138 : on_dsm_detach(segment, shared_record_typmod_registry_detach, (Datum) 0);
2287 138 : }
2288 :
2289 : /*
2290 : * Attach to 'registry', which must have been initialized already by another
2291 : * backend. Future calls to assign_record_type_typmod and
2292 : * lookup_rowtype_tupdesc_internal will use the shared registry until the
2293 : * current session is detached.
2294 : */
2295 : void
2296 2736 : SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
2297 : {
2298 : MemoryContext old_context;
2299 : dshash_table *record_table;
2300 : dshash_table *typmod_table;
2301 :
2302 : Assert(IsParallelWorker());
2303 :
2304 : /* We can't already be attached to a shared registry. */
2305 : Assert(CurrentSession != NULL);
2306 : Assert(CurrentSession->segment != NULL);
2307 : Assert(CurrentSession->area != NULL);
2308 : Assert(CurrentSession->shared_typmod_registry == NULL);
2309 : Assert(CurrentSession->shared_record_table == NULL);
2310 : Assert(CurrentSession->shared_typmod_table == NULL);
2311 :
2312 : /*
2313 : * We can't already have typmods in our local cache, because they'd clash
2314 : * with those imported by SharedRecordTypmodRegistryInit. This should be
2315 : * a freshly started parallel worker. If we ever support worker
2316 : * recycling, a worker would need to zap its local cache in between
2317 : * servicing different queries, in order to be able to call this and
2318 : * synchronize typmods with a new leader; but that's problematic because
2319 : * we can't be very sure that record-typmod-related state hasn't escaped
2320 : * to anywhere else in the process.
2321 : */
2322 : Assert(NextRecordTypmod == 0);
2323 :
2324 2736 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2325 :
2326 : /* Attach to the two hash tables. */
2327 2736 : record_table = dshash_attach(CurrentSession->area,
2328 : &srtr_record_table_params,
2329 : registry->record_table_handle,
2330 2736 : CurrentSession->area);
2331 2736 : typmod_table = dshash_attach(CurrentSession->area,
2332 : &srtr_typmod_table_params,
2333 : registry->typmod_table_handle,
2334 : NULL);
2335 :
2336 2736 : MemoryContextSwitchTo(old_context);
2337 :
2338 : /*
2339 : * Set up detach hook to run at worker exit. Currently this is the same
2340 : * as the leader's detach hook, but in future they might need to be
2341 : * different.
2342 : */
2343 2736 : on_dsm_detach(CurrentSession->segment,
2344 : shared_record_typmod_registry_detach,
2345 : PointerGetDatum(registry));
2346 :
2347 : /*
2348 : * Set up the session state that will tell assign_record_type_typmod and
2349 : * lookup_rowtype_tupdesc_internal about the shared registry.
2350 : */
2351 2736 : CurrentSession->shared_typmod_registry = registry;
2352 2736 : CurrentSession->shared_record_table = record_table;
2353 2736 : CurrentSession->shared_typmod_table = typmod_table;
2354 2736 : }
2355 :
2356 : /*
2357 : * InvalidateCompositeTypeCacheEntry
2358 : * Invalidate particular TypeCacheEntry on Relcache inval callback
2359 : *
2360 : * Delete the cached tuple descriptor (if any) for the given composite
2361 : * type, and reset whatever info we have cached about the composite type's
2362 : * comparability.
2363 : */
2364 : static void
2365 10088 : InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
2366 : {
2367 : bool hadTupDescOrOpclass;
2368 :
2369 : Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2370 : OidIsValid(typentry->typrelid));
2371 :
2372 16964 : hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2373 6876 : (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2374 :
2375 : /* Delete tupdesc if we have it */
2376 10088 : if (typentry->tupDesc != NULL)
2377 : {
2378 : /*
2379 : * Release our refcount and free the tupdesc if none remain. We can't
2380 : * use DecrTupleDescRefCount here because this reference is not logged
2381 : * by the current resource owner.
2382 : */
2383 : Assert(typentry->tupDesc->tdrefcount > 0);
2384 3212 : if (--typentry->tupDesc->tdrefcount == 0)
2385 2612 : FreeTupleDesc(typentry->tupDesc);
2386 3212 : typentry->tupDesc = NULL;
2387 :
2388 : /*
2389 : * Also clear tupDesc_identifier, so that anyone watching it will
2390 : * realize that the tupdesc has changed.
2391 : */
2392 3212 : typentry->tupDesc_identifier = 0;
2393 : }
2394 :
2395 : /* Reset equality/comparison/hashing validity information */
2396 10088 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2397 :
2398 : /*
2399 : * Call delete_rel_type_cache_if_needed() if we actually cleared
2400 : * something.
2401 : */
2402 10088 : if (hadTupDescOrOpclass)
2403 3212 : delete_rel_type_cache_if_needed(typentry);
2404 10088 : }
2405 :
2406 : /*
2407 : * TypeCacheRelCallback
2408 : * Relcache inval callback function
2409 : *
2410 : * Delete the cached tuple descriptor (if any) for the given rel's composite
2411 : * type, or for all composite types if relid == InvalidOid. Also reset
2412 : * whatever info we have cached about the composite type's comparability.
2413 : *
2414 : * This is called when a relcache invalidation event occurs for the given
2415 : * relid. We can't use syscache to find a type corresponding to the given
2416 : * relation because the code can be called outside of transaction. Thus, we
2417 : * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2418 : */
2419 : static void
2420 1969026 : TypeCacheRelCallback(Datum arg, Oid relid)
2421 : {
2422 : TypeCacheEntry *typentry;
2423 :
2424 : /*
2425 : * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2426 : * callback wouldn't be registered
2427 : */
2428 1969026 : if (OidIsValid(relid))
2429 : {
2430 : RelIdToTypeIdCacheEntry *relentry;
2431 :
2432 : /*
2433 : * Find an RelIdToTypeIdCacheHash entry, which should exist as soon as
2434 : * corresponding typcache entry has something to clean.
2435 : */
2436 1968338 : relentry = (RelIdToTypeIdCacheEntry *) hash_search(RelIdToTypeIdCacheHash,
2437 : &relid,
2438 : HASH_FIND, NULL);
2439 :
2440 1968338 : if (relentry != NULL)
2441 : {
2442 9954 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
2443 9954 : &relentry->composite_typid,
2444 : HASH_FIND, NULL);
2445 :
2446 9954 : if (typentry != NULL)
2447 : {
2448 : Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2449 : Assert(relid == typentry->typrelid);
2450 :
2451 9954 : InvalidateCompositeTypeCacheEntry(typentry);
2452 : }
2453 : }
2454 :
2455 : /*
2456 : * Visit all the domain types sequentially. Typically, this shouldn't
2457 : * affect performance since domain types are less tended to bloat.
2458 : * Domain types are created manually, unlike composite types which are
2459 : * automatically created for every temporary table.
2460 : */
2461 3743776 : for (typentry = firstDomainTypeEntry;
2462 : typentry != NULL;
2463 1775438 : typentry = typentry->nextDomain)
2464 : {
2465 : /*
2466 : * If it's domain over composite, reset flags. (We don't bother
2467 : * trying to determine whether the specific base type needs a
2468 : * reset.) Note that if we haven't determined whether the base
2469 : * type is composite, we don't need to reset anything.
2470 : */
2471 1775438 : if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2472 0 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2473 : }
2474 : }
2475 : else
2476 : {
2477 : HASH_SEQ_STATUS status;
2478 :
2479 : /*
2480 : * Relid is invalid. By convention, we need to reset all composite
2481 : * types in cache. Also, we should reset flags for domain types, and
2482 : * we loop over all entries in hash, so, do it in a single scan.
2483 : */
2484 688 : hash_seq_init(&status, TypeCacheHash);
2485 3134 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2486 : {
2487 2446 : if (typentry->typtype == TYPTYPE_COMPOSITE)
2488 : {
2489 134 : InvalidateCompositeTypeCacheEntry(typentry);
2490 : }
2491 2312 : else if (typentry->typtype == TYPTYPE_DOMAIN)
2492 : {
2493 : /*
2494 : * If it's domain over composite, reset flags. (We don't
2495 : * bother trying to determine whether the specific base type
2496 : * needs a reset.) Note that if we haven't determined whether
2497 : * the base type is composite, we don't need to reset
2498 : * anything.
2499 : */
2500 34 : if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2501 0 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2502 : }
2503 : }
2504 : }
2505 1969026 : }
2506 :
2507 : /*
2508 : * TypeCacheTypCallback
2509 : * Syscache inval callback function
2510 : *
2511 : * This is called when a syscache invalidation event occurs for any
2512 : * pg_type row. If we have information cached about that type, mark
2513 : * it as needing to be reloaded.
2514 : */
2515 : static void
2516 633288 : TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
2517 : {
2518 : HASH_SEQ_STATUS status;
2519 : TypeCacheEntry *typentry;
2520 :
2521 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2522 :
2523 : /*
2524 : * By convention, zero hash value is passed to the callback as a sign that
2525 : * it's time to invalidate the whole cache. See sinval.c, inval.c and
2526 : * InvalidateSystemCachesExtended().
2527 : */
2528 633288 : if (hashvalue == 0)
2529 526 : hash_seq_init(&status, TypeCacheHash);
2530 : else
2531 632762 : hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2532 :
2533 640184 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2534 : {
2535 6896 : bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2536 :
2537 : Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2538 :
2539 : /*
2540 : * Mark the data obtained directly from pg_type as invalid. Also, if
2541 : * it's a domain, typnotnull might've changed, so we'll need to
2542 : * recalculate its constraints.
2543 : */
2544 6896 : typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2545 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS);
2546 :
2547 : /*
2548 : * Call delete_rel_type_cache_if_needed() if we cleaned
2549 : * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2550 : */
2551 6896 : if (hadPgTypeData)
2552 3576 : delete_rel_type_cache_if_needed(typentry);
2553 : }
2554 633288 : }
2555 :
2556 : /*
2557 : * TypeCacheOpcCallback
2558 : * Syscache inval callback function
2559 : *
2560 : * This is called when a syscache invalidation event occurs for any pg_opclass
2561 : * row. In principle we could probably just invalidate data dependent on the
2562 : * particular opclass, but since updates on pg_opclass are rare in production
2563 : * it doesn't seem worth a lot of complication: we just mark all cached data
2564 : * invalid.
2565 : *
2566 : * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2567 : * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2568 : * is not allowed to be used to add/drop the primary operators and functions
2569 : * of an opclass, only cross-type members of a family; and the latter sorts
2570 : * of members are not going to get cached here.
2571 : */
2572 : static void
2573 1534 : TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2574 : {
2575 : HASH_SEQ_STATUS status;
2576 : TypeCacheEntry *typentry;
2577 :
2578 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2579 1534 : hash_seq_init(&status, TypeCacheHash);
2580 9252 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2581 : {
2582 7718 : bool hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2583 :
2584 : /* Reset equality/comparison/hashing validity information */
2585 7718 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2586 :
2587 : /*
2588 : * Call delete_rel_type_cache_if_needed() if we actually cleared some
2589 : * of TCFLAGS_OPERATOR_FLAGS.
2590 : */
2591 7718 : if (hadOpclass)
2592 1490 : delete_rel_type_cache_if_needed(typentry);
2593 : }
2594 1534 : }
2595 :
2596 : /*
2597 : * TypeCacheConstrCallback
2598 : * Syscache inval callback function
2599 : *
2600 : * This is called when a syscache invalidation event occurs for any
2601 : * pg_constraint row. We flush information about domain constraints
2602 : * when this happens.
2603 : *
2604 : * It's slightly annoying that we can't tell whether the inval event was for
2605 : * a domain constraint record or not; there's usually more update traffic
2606 : * for table constraints than domain constraints, so we'll do a lot of
2607 : * useless flushes. Still, this is better than the old no-caching-at-all
2608 : * approach to domain constraints.
2609 : */
2610 : static void
2611 186068 : TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2612 : {
2613 : TypeCacheEntry *typentry;
2614 :
2615 : /*
2616 : * Because this is called very frequently, and typically very few of the
2617 : * typcache entries are for domains, we don't use hash_seq_search here.
2618 : * Instead we thread all the domain-type entries together so that we can
2619 : * visit them cheaply.
2620 : */
2621 379198 : for (typentry = firstDomainTypeEntry;
2622 : typentry != NULL;
2623 193130 : typentry = typentry->nextDomain)
2624 : {
2625 : /* Reset domain constraint validity information */
2626 193130 : typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
2627 : }
2628 186068 : }
2629 :
2630 :
2631 : /*
2632 : * Check if given OID is part of the subset that's sortable by comparisons
2633 : */
2634 : static inline bool
2635 303858 : enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
2636 : {
2637 : Oid offset;
2638 :
2639 303858 : if (arg < enumdata->bitmap_base)
2640 0 : return false;
2641 303858 : offset = arg - enumdata->bitmap_base;
2642 303858 : if (offset > (Oid) INT_MAX)
2643 0 : return false;
2644 303858 : return bms_is_member((int) offset, enumdata->sorted_values);
2645 : }
2646 :
2647 :
2648 : /*
2649 : * compare_values_of_enum
2650 : * Compare two members of an enum type.
2651 : * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2652 : *
2653 : * Note: currently, the enumData cache is refreshed only if we are asked
2654 : * to compare an enum value that is not already in the cache. This is okay
2655 : * because there is no support for re-ordering existing values, so comparisons
2656 : * of previously cached values will return the right answer even if other
2657 : * values have been added since we last loaded the cache.
2658 : *
2659 : * Note: the enum logic has a special-case rule about even-numbered versus
2660 : * odd-numbered OIDs, but we take no account of that rule here; this
2661 : * routine shouldn't even get called when that rule applies.
2662 : */
2663 : int
2664 152426 : compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
2665 : {
2666 : TypeCacheEnumData *enumdata;
2667 : EnumItem *item1;
2668 : EnumItem *item2;
2669 :
2670 : /*
2671 : * Equal OIDs are certainly equal --- this case was probably handled by
2672 : * our caller, but we may as well check.
2673 : */
2674 152426 : if (arg1 == arg2)
2675 0 : return 0;
2676 :
2677 : /* Load up the cache if first time through */
2678 152426 : if (tcache->enumData == NULL)
2679 10 : load_enum_cache_data(tcache);
2680 152426 : enumdata = tcache->enumData;
2681 :
2682 : /*
2683 : * If both OIDs are known-sorted, we can just compare them directly.
2684 : */
2685 303858 : if (enum_known_sorted(enumdata, arg1) &&
2686 151432 : enum_known_sorted(enumdata, arg2))
2687 : {
2688 0 : if (arg1 < arg2)
2689 0 : return -1;
2690 : else
2691 0 : return 1;
2692 : }
2693 :
2694 : /*
2695 : * Slow path: we have to identify their actual sort-order positions.
2696 : */
2697 152426 : item1 = find_enumitem(enumdata, arg1);
2698 152426 : item2 = find_enumitem(enumdata, arg2);
2699 :
2700 152426 : if (item1 == NULL || item2 == NULL)
2701 : {
2702 : /*
2703 : * We couldn't find one or both values. That means the enum has
2704 : * changed under us, so re-initialize the cache and try again. We
2705 : * don't bother retrying the known-sorted case in this path.
2706 : */
2707 0 : load_enum_cache_data(tcache);
2708 0 : enumdata = tcache->enumData;
2709 :
2710 0 : item1 = find_enumitem(enumdata, arg1);
2711 0 : item2 = find_enumitem(enumdata, arg2);
2712 :
2713 : /*
2714 : * If we still can't find the values, complain: we must have corrupt
2715 : * data.
2716 : */
2717 0 : if (item1 == NULL)
2718 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2719 : arg1, format_type_be(tcache->type_id));
2720 0 : if (item2 == NULL)
2721 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2722 : arg2, format_type_be(tcache->type_id));
2723 : }
2724 :
2725 152426 : if (item1->sort_order < item2->sort_order)
2726 51536 : return -1;
2727 100890 : else if (item1->sort_order > item2->sort_order)
2728 100890 : return 1;
2729 : else
2730 0 : return 0;
2731 : }
2732 :
2733 : /*
2734 : * Load (or re-load) the enumData member of the typcache entry.
2735 : */
2736 : static void
2737 10 : load_enum_cache_data(TypeCacheEntry *tcache)
2738 : {
2739 : TypeCacheEnumData *enumdata;
2740 : Relation enum_rel;
2741 : SysScanDesc enum_scan;
2742 : HeapTuple enum_tuple;
2743 : ScanKeyData skey;
2744 : EnumItem *items;
2745 : int numitems;
2746 : int maxitems;
2747 : Oid bitmap_base;
2748 : Bitmapset *bitmap;
2749 : MemoryContext oldcxt;
2750 : int bm_size,
2751 : start_pos;
2752 :
2753 : /* Check that this is actually an enum */
2754 10 : if (tcache->typtype != TYPTYPE_ENUM)
2755 0 : ereport(ERROR,
2756 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2757 : errmsg("%s is not an enum",
2758 : format_type_be(tcache->type_id))));
2759 :
2760 : /*
2761 : * Read all the information for members of the enum type. We collect the
2762 : * info in working memory in the caller's context, and then transfer it to
2763 : * permanent memory in CacheMemoryContext. This minimizes the risk of
2764 : * leaking memory from CacheMemoryContext in the event of an error partway
2765 : * through.
2766 : */
2767 10 : maxitems = 64;
2768 10 : items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2769 10 : numitems = 0;
2770 :
2771 : /* Scan pg_enum for the members of the target enum type. */
2772 10 : ScanKeyInit(&skey,
2773 : Anum_pg_enum_enumtypid,
2774 : BTEqualStrategyNumber, F_OIDEQ,
2775 : ObjectIdGetDatum(tcache->type_id));
2776 :
2777 10 : enum_rel = table_open(EnumRelationId, AccessShareLock);
2778 10 : enum_scan = systable_beginscan(enum_rel,
2779 : EnumTypIdLabelIndexId,
2780 : true, NULL,
2781 : 1, &skey);
2782 :
2783 80 : while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2784 : {
2785 70 : Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2786 :
2787 70 : if (numitems >= maxitems)
2788 : {
2789 0 : maxitems *= 2;
2790 0 : items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2791 : }
2792 70 : items[numitems].enum_oid = en->oid;
2793 70 : items[numitems].sort_order = en->enumsortorder;
2794 70 : numitems++;
2795 : }
2796 :
2797 10 : systable_endscan(enum_scan);
2798 10 : table_close(enum_rel, AccessShareLock);
2799 :
2800 : /* Sort the items into OID order */
2801 10 : qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2802 :
2803 : /*
2804 : * Here, we create a bitmap listing a subset of the enum's OIDs that are
2805 : * known to be in order and can thus be compared with just OID comparison.
2806 : *
2807 : * The point of this is that the enum's initial OIDs were certainly in
2808 : * order, so there is some subset that can be compared via OID comparison;
2809 : * and we'd rather not do binary searches unnecessarily.
2810 : *
2811 : * This is somewhat heuristic, and might identify a subset of OIDs that
2812 : * isn't exactly what the type started with. That's okay as long as the
2813 : * subset is correctly sorted.
2814 : */
2815 10 : bitmap_base = InvalidOid;
2816 10 : bitmap = NULL;
2817 10 : bm_size = 1; /* only save sets of at least 2 OIDs */
2818 :
2819 22 : for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2820 : {
2821 : /*
2822 : * Identify longest sorted subsequence starting at start_pos
2823 : */
2824 22 : Bitmapset *this_bitmap = bms_make_singleton(0);
2825 22 : int this_bm_size = 1;
2826 22 : Oid start_oid = items[start_pos].enum_oid;
2827 22 : float4 prev_order = items[start_pos].sort_order;
2828 : int i;
2829 :
2830 148 : for (i = start_pos + 1; i < numitems; i++)
2831 : {
2832 : Oid offset;
2833 :
2834 126 : offset = items[i].enum_oid - start_oid;
2835 : /* quit if bitmap would be too large; cutoff is arbitrary */
2836 126 : if (offset >= 8192)
2837 0 : break;
2838 : /* include the item if it's in-order */
2839 126 : if (items[i].sort_order > prev_order)
2840 : {
2841 68 : prev_order = items[i].sort_order;
2842 68 : this_bitmap = bms_add_member(this_bitmap, (int) offset);
2843 68 : this_bm_size++;
2844 : }
2845 : }
2846 :
2847 : /* Remember it if larger than previous best */
2848 22 : if (this_bm_size > bm_size)
2849 : {
2850 10 : bms_free(bitmap);
2851 10 : bitmap_base = start_oid;
2852 10 : bitmap = this_bitmap;
2853 10 : bm_size = this_bm_size;
2854 : }
2855 : else
2856 12 : bms_free(this_bitmap);
2857 :
2858 : /*
2859 : * Done if it's not possible to find a longer sequence in the rest of
2860 : * the list. In typical cases this will happen on the first
2861 : * iteration, which is why we create the bitmaps on the fly instead of
2862 : * doing a second pass over the list.
2863 : */
2864 22 : if (bm_size >= (numitems - start_pos - 1))
2865 10 : break;
2866 : }
2867 :
2868 : /* OK, copy the data into CacheMemoryContext */
2869 10 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2870 : enumdata = (TypeCacheEnumData *)
2871 10 : palloc(offsetof(TypeCacheEnumData, enum_values) +
2872 10 : numitems * sizeof(EnumItem));
2873 10 : enumdata->bitmap_base = bitmap_base;
2874 10 : enumdata->sorted_values = bms_copy(bitmap);
2875 10 : enumdata->num_values = numitems;
2876 10 : memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2877 10 : MemoryContextSwitchTo(oldcxt);
2878 :
2879 10 : pfree(items);
2880 10 : bms_free(bitmap);
2881 :
2882 : /* And link the finished cache struct into the typcache */
2883 10 : if (tcache->enumData != NULL)
2884 0 : pfree(tcache->enumData);
2885 10 : tcache->enumData = enumdata;
2886 10 : }
2887 :
2888 : /*
2889 : * Locate the EnumItem with the given OID, if present
2890 : */
2891 : static EnumItem *
2892 304852 : find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
2893 : {
2894 : EnumItem srch;
2895 :
2896 : /* On some versions of Solaris, bsearch of zero items dumps core */
2897 304852 : if (enumdata->num_values <= 0)
2898 0 : return NULL;
2899 :
2900 304852 : srch.enum_oid = arg;
2901 304852 : return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2902 : sizeof(EnumItem), enum_oid_cmp);
2903 : }
2904 :
2905 : /*
2906 : * qsort comparison function for OID-ordered EnumItems
2907 : */
2908 : static int
2909 614378 : enum_oid_cmp(const void *left, const void *right)
2910 : {
2911 614378 : const EnumItem *l = (const EnumItem *) left;
2912 614378 : const EnumItem *r = (const EnumItem *) right;
2913 :
2914 614378 : return pg_cmp_u32(l->enum_oid, r->enum_oid);
2915 : }
2916 :
2917 : /*
2918 : * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2919 : * to the given value and return a dsa_pointer.
2920 : */
2921 : static dsa_pointer
2922 174 : share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2923 : {
2924 : dsa_pointer shared_dp;
2925 : TupleDesc shared;
2926 :
2927 174 : shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2928 174 : shared = (TupleDesc) dsa_get_address(area, shared_dp);
2929 174 : TupleDescCopy(shared, tupdesc);
2930 174 : shared->tdtypmod = typmod;
2931 :
2932 174 : return shared_dp;
2933 : }
2934 :
2935 : /*
2936 : * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2937 : * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2938 : * Tuple descriptors returned by this function are not reference counted, and
2939 : * will exist at least as long as the current backend remained attached to the
2940 : * current session.
2941 : */
2942 : static TupleDesc
2943 16550 : find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
2944 : {
2945 : TupleDesc result;
2946 : SharedRecordTableKey key;
2947 : SharedRecordTableEntry *record_table_entry;
2948 : SharedTypmodTableEntry *typmod_table_entry;
2949 : dsa_pointer shared_dp;
2950 : bool found;
2951 : uint32 typmod;
2952 :
2953 : /* If not even attached, nothing to do. */
2954 16550 : if (CurrentSession->shared_typmod_registry == NULL)
2955 16476 : return NULL;
2956 :
2957 : /* Try to find a matching tuple descriptor in the record table. */
2958 74 : key.shared = false;
2959 74 : key.u.local_tupdesc = tupdesc;
2960 : record_table_entry = (SharedRecordTableEntry *)
2961 74 : dshash_find(CurrentSession->shared_record_table, &key, false);
2962 74 : if (record_table_entry)
2963 : {
2964 : Assert(record_table_entry->key.shared);
2965 6 : dshash_release_lock(CurrentSession->shared_record_table,
2966 : record_table_entry);
2967 : result = (TupleDesc)
2968 6 : dsa_get_address(CurrentSession->area,
2969 : record_table_entry->key.u.shared_tupdesc);
2970 : Assert(result->tdrefcount == -1);
2971 :
2972 6 : return result;
2973 : }
2974 :
2975 : /* Allocate a new typmod number. This will be wasted if we error out. */
2976 68 : typmod = (int)
2977 68 : pg_atomic_fetch_add_u32(&CurrentSession->shared_typmod_registry->next_typmod,
2978 : 1);
2979 :
2980 : /* Copy the TupleDesc into shared memory. */
2981 68 : shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2982 :
2983 : /*
2984 : * Create an entry in the typmod table so that others will understand this
2985 : * typmod number.
2986 : */
2987 68 : PG_TRY();
2988 : {
2989 : typmod_table_entry = (SharedTypmodTableEntry *)
2990 68 : dshash_find_or_insert(CurrentSession->shared_typmod_table,
2991 : &typmod, &found);
2992 68 : if (found)
2993 0 : elog(ERROR, "cannot create duplicate shared record typmod");
2994 : }
2995 0 : PG_CATCH();
2996 : {
2997 0 : dsa_free(CurrentSession->area, shared_dp);
2998 0 : PG_RE_THROW();
2999 : }
3000 68 : PG_END_TRY();
3001 68 : typmod_table_entry->typmod = typmod;
3002 68 : typmod_table_entry->shared_tupdesc = shared_dp;
3003 68 : dshash_release_lock(CurrentSession->shared_typmod_table,
3004 : typmod_table_entry);
3005 :
3006 : /*
3007 : * Finally create an entry in the record table so others with matching
3008 : * tuple descriptors can reuse the typmod.
3009 : */
3010 : record_table_entry = (SharedRecordTableEntry *)
3011 68 : dshash_find_or_insert(CurrentSession->shared_record_table, &key,
3012 : &found);
3013 68 : if (found)
3014 : {
3015 : /*
3016 : * Someone concurrently inserted a matching tuple descriptor since the
3017 : * first time we checked. Use that one instead.
3018 : */
3019 0 : dshash_release_lock(CurrentSession->shared_record_table,
3020 : record_table_entry);
3021 :
3022 : /* Might as well free up the space used by the one we created. */
3023 0 : found = dshash_delete_key(CurrentSession->shared_typmod_table,
3024 : &typmod);
3025 : Assert(found);
3026 0 : dsa_free(CurrentSession->area, shared_dp);
3027 :
3028 : /* Return the one we found. */
3029 : Assert(record_table_entry->key.shared);
3030 : result = (TupleDesc)
3031 0 : dsa_get_address(CurrentSession->area,
3032 : record_table_entry->key.u.shared_tupdesc);
3033 : Assert(result->tdrefcount == -1);
3034 :
3035 0 : return result;
3036 : }
3037 :
3038 : /* Store it and return it. */
3039 68 : record_table_entry->key.shared = true;
3040 68 : record_table_entry->key.u.shared_tupdesc = shared_dp;
3041 68 : dshash_release_lock(CurrentSession->shared_record_table,
3042 : record_table_entry);
3043 : result = (TupleDesc)
3044 68 : dsa_get_address(CurrentSession->area, shared_dp);
3045 : Assert(result->tdrefcount == -1);
3046 :
3047 68 : return result;
3048 : }
3049 :
3050 : /*
3051 : * On-DSM-detach hook to forget about the current shared record typmod
3052 : * infrastructure. This is currently used by both leader and workers.
3053 : */
3054 : static void
3055 2874 : shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
3056 : {
3057 : /* Be cautious here: maybe we didn't finish initializing. */
3058 2874 : if (CurrentSession->shared_record_table != NULL)
3059 : {
3060 2874 : dshash_detach(CurrentSession->shared_record_table);
3061 2874 : CurrentSession->shared_record_table = NULL;
3062 : }
3063 2874 : if (CurrentSession->shared_typmod_table != NULL)
3064 : {
3065 2874 : dshash_detach(CurrentSession->shared_typmod_table);
3066 2874 : CurrentSession->shared_typmod_table = NULL;
3067 : }
3068 2874 : CurrentSession->shared_typmod_registry = NULL;
3069 2874 : }
3070 :
3071 : /*
3072 : * Insert RelIdToTypeIdCacheHash entry if needed.
3073 : */
3074 : static void
3075 744232 : insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
3076 : {
3077 : /* Immediately quit for non-composite types */
3078 744232 : if (typentry->typtype != TYPTYPE_COMPOSITE)
3079 659304 : return;
3080 :
3081 : /* typrelid should be given for composite types */
3082 : Assert(OidIsValid(typentry->typrelid));
3083 :
3084 : /*
3085 : * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3086 : * information indicating it should be here.
3087 : */
3088 84928 : if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3089 0 : (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3090 0 : typentry->tupDesc != NULL)
3091 : {
3092 : RelIdToTypeIdCacheEntry *relentry;
3093 : bool found;
3094 :
3095 84928 : relentry = (RelIdToTypeIdCacheEntry *) hash_search(RelIdToTypeIdCacheHash,
3096 84928 : &typentry->typrelid,
3097 : HASH_ENTER, &found);
3098 84928 : relentry->relid = typentry->typrelid;
3099 84928 : relentry->composite_typid = typentry->type_id;
3100 : }
3101 : }
3102 :
3103 : /*
3104 : * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3105 : * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3106 : * or tupDesc.
3107 : */
3108 : static void
3109 8278 : delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
3110 : {
3111 : #ifdef USE_ASSERT_CHECKING
3112 : int i;
3113 : bool is_in_progress = false;
3114 :
3115 : for (i = 0; i < in_progress_list_len; i++)
3116 : {
3117 : if (in_progress_list[i] == typentry->type_id)
3118 : {
3119 : is_in_progress = true;
3120 : break;
3121 : }
3122 : }
3123 : #endif
3124 :
3125 : /* Immediately quit for non-composite types */
3126 8278 : if (typentry->typtype != TYPTYPE_COMPOSITE)
3127 2902 : return;
3128 :
3129 : /* typrelid should be given for composite types */
3130 : Assert(OidIsValid(typentry->typrelid));
3131 :
3132 : /*
3133 : * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3134 : * information indicating entry should be still there.
3135 : */
3136 5376 : if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3137 2908 : !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3138 2824 : typentry->tupDesc == NULL)
3139 : {
3140 : bool found;
3141 :
3142 2158 : (void) hash_search(RelIdToTypeIdCacheHash,
3143 2158 : &typentry->typrelid,
3144 : HASH_REMOVE, &found);
3145 : Assert(found || is_in_progress);
3146 : }
3147 : else
3148 : {
3149 : #ifdef USE_ASSERT_CHECKING
3150 : /*
3151 : * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3152 : * entry if it should exist.
3153 : */
3154 : bool found;
3155 :
3156 : if (!is_in_progress)
3157 : {
3158 : (void) hash_search(RelIdToTypeIdCacheHash,
3159 : &typentry->typrelid,
3160 : HASH_FIND, &found);
3161 : Assert(found);
3162 : }
3163 : #endif
3164 : }
3165 : }
3166 :
3167 : /*
3168 : * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3169 : * entries, marked as in-progress by lookup_type_cache(). It may happen
3170 : * in case of an error or interruption during the lookup_type_cache() call.
3171 : */
3172 : static void
3173 871240 : finalize_in_progress_typentries(void)
3174 : {
3175 : int i;
3176 :
3177 871242 : for (i = 0; i < in_progress_list_len; i++)
3178 : {
3179 : TypeCacheEntry *typentry;
3180 :
3181 2 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
3182 2 : &in_progress_list[i],
3183 : HASH_FIND, NULL);
3184 2 : if (typentry)
3185 2 : insert_rel_type_cache_if_needed(typentry);
3186 : }
3187 :
3188 871240 : in_progress_list_len = 0;
3189 871240 : }
3190 :
3191 : void
3192 851206 : AtEOXact_TypeCache(void)
3193 : {
3194 851206 : finalize_in_progress_typentries();
3195 851206 : }
3196 :
3197 : void
3198 20034 : AtEOSubXact_TypeCache(void)
3199 : {
3200 20034 : finalize_in_progress_typentries();
3201 20034 : }
|