Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * typcache.c
4 : * POSTGRES type cache code
5 : *
6 : * The type cache exists to speed lookup of certain information about data
7 : * types that is not directly available from a type's pg_type row. For
8 : * example, we use a type's default btree opclass, or the default hash
9 : * opclass if no btree opclass exists, to determine which operators should
10 : * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 : *
12 : * Several seemingly-odd choices have been made to support use of the type
13 : * cache by generic array and record handling routines, such as array_eq(),
14 : * record_cmp(), and hash_array(). Because those routines are used as index
15 : * support operations, they cannot leak memory. To allow them to execute
16 : * efficiently, all information that they would like to re-use across calls
17 : * is kept in the type cache.
18 : *
19 : * Once created, a type cache entry lives as long as the backend does, so
20 : * there is no need for a call to release a cache entry. If the type is
21 : * dropped, the cache entry simply becomes wasted storage. This is not
22 : * expected to happen often, and assuming that typcache entries are good
23 : * permanently allows caching pointers to them in long-lived places.
24 : *
25 : * We have some provisions for updating cache entries if the stored data
26 : * becomes obsolete. Core data extracted from the pg_type row is updated
27 : * when we detect updates to pg_type. Information dependent on opclasses is
28 : * cleared if we detect updates to pg_opclass. We also support clearing the
29 : * tuple descriptor and operator/function parts of a rowtype's cache entry,
30 : * since those may need to change as a consequence of ALTER TABLE. Domain
31 : * constraint changes are also tracked properly.
32 : *
33 : *
34 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
35 : * Portions Copyright (c) 1994, Regents of the University of California
36 : *
37 : * IDENTIFICATION
38 : * src/backend/utils/cache/typcache.c
39 : *
40 : *-------------------------------------------------------------------------
41 : */
42 : #include "postgres.h"
43 :
44 : #include <limits.h>
45 :
46 : #include "access/hash.h"
47 : #include "access/htup_details.h"
48 : #include "access/nbtree.h"
49 : #include "access/parallel.h"
50 : #include "access/relation.h"
51 : #include "access/session.h"
52 : #include "access/table.h"
53 : #include "catalog/pg_am.h"
54 : #include "catalog/pg_constraint.h"
55 : #include "catalog/pg_enum.h"
56 : #include "catalog/pg_operator.h"
57 : #include "catalog/pg_range.h"
58 : #include "catalog/pg_type.h"
59 : #include "commands/defrem.h"
60 : #include "common/int.h"
61 : #include "executor/executor.h"
62 : #include "lib/dshash.h"
63 : #include "optimizer/optimizer.h"
64 : #include "port/pg_bitutils.h"
65 : #include "storage/lwlock.h"
66 : #include "utils/builtins.h"
67 : #include "utils/catcache.h"
68 : #include "utils/fmgroids.h"
69 : #include "utils/injection_point.h"
70 : #include "utils/inval.h"
71 : #include "utils/lsyscache.h"
72 : #include "utils/memutils.h"
73 : #include "utils/rel.h"
74 : #include "utils/syscache.h"
75 : #include "utils/typcache.h"
76 :
77 :
78 : /* The main type cache hashtable searched by lookup_type_cache */
79 : static HTAB *TypeCacheHash = NULL;
80 :
81 : /*
82 : * The mapping of relation's OID to the corresponding composite type OID.
83 : * We're keeping the map entry when the corresponding typentry has something
84 : * to clear i.e it has either TCFLAGS_HAVE_PG_TYPE_DATA, or
85 : * TCFLAGS_OPERATOR_FLAGS, or tupdesc.
86 : */
87 : static HTAB *RelIdToTypeIdCacheHash = NULL;
88 :
89 : typedef struct RelIdToTypeIdCacheEntry
90 : {
91 : Oid relid; /* OID of the relation */
92 : Oid composite_typid; /* OID of the relation's composite type */
93 : } RelIdToTypeIdCacheEntry;
94 :
95 : /* List of type cache entries for domain types */
96 : static TypeCacheEntry *firstDomainTypeEntry = NULL;
97 :
98 : /* Private flag bits in the TypeCacheEntry.flags field */
99 : #define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
100 : #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
101 : #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
102 : #define TCFLAGS_CHECKED_EQ_OPR 0x000008
103 : #define TCFLAGS_CHECKED_LT_OPR 0x000010
104 : #define TCFLAGS_CHECKED_GT_OPR 0x000020
105 : #define TCFLAGS_CHECKED_CMP_PROC 0x000040
106 : #define TCFLAGS_CHECKED_HASH_PROC 0x000080
107 : #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
108 : #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
109 : #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
110 : #define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
111 : #define TCFLAGS_HAVE_ELEM_HASHING 0x001000
112 : #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
113 : #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
114 : #define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
115 : #define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
116 : #define TCFLAGS_HAVE_FIELD_HASHING 0x020000
117 : #define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
118 : #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
119 : #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
120 :
121 : /* The flags associated with equality/comparison/hashing are all but these: */
122 : #define TCFLAGS_OPERATOR_FLAGS \
123 : (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
124 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
125 : TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
126 :
127 : /*
128 : * Data stored about a domain type's constraints. Note that we do not create
129 : * this struct for the common case of a constraint-less domain; we just set
130 : * domainData to NULL to indicate that.
131 : *
132 : * Within a DomainConstraintCache, we store expression plan trees, but the
133 : * check_exprstate fields of the DomainConstraintState nodes are just NULL.
134 : * When needed, expression evaluation nodes are built by flat-copying the
135 : * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
136 : * Such a node tree is not part of the DomainConstraintCache, but is
137 : * considered to belong to a DomainConstraintRef.
138 : */
139 : struct DomainConstraintCache
140 : {
141 : List *constraints; /* list of DomainConstraintState nodes */
142 : MemoryContext dccContext; /* memory context holding all associated data */
143 : long dccRefCount; /* number of references to this struct */
144 : };
145 :
146 : /* Private information to support comparisons of enum values */
147 : typedef struct
148 : {
149 : Oid enum_oid; /* OID of one enum value */
150 : float4 sort_order; /* its sort position */
151 : } EnumItem;
152 :
153 : typedef struct TypeCacheEnumData
154 : {
155 : Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
156 : Bitmapset *sorted_values; /* Set of OIDs known to be in order */
157 : int num_values; /* total number of values in enum */
158 : EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER];
159 : } TypeCacheEnumData;
160 :
161 : /*
162 : * We use a separate table for storing the definitions of non-anonymous
163 : * record types. Once defined, a record type will be remembered for the
164 : * life of the backend. Subsequent uses of the "same" record type (where
165 : * sameness means equalRowTypes) will refer to the existing table entry.
166 : *
167 : * Stored record types are remembered in a linear array of TupleDescs,
168 : * which can be indexed quickly with the assigned typmod. There is also
169 : * a hash table to speed searches for matching TupleDescs.
170 : */
171 :
172 : typedef struct RecordCacheEntry
173 : {
174 : TupleDesc tupdesc;
175 : } RecordCacheEntry;
176 :
177 : /*
178 : * To deal with non-anonymous record types that are exchanged by backends
179 : * involved in a parallel query, we also need a shared version of the above.
180 : */
181 : struct SharedRecordTypmodRegistry
182 : {
183 : /* A hash table for finding a matching TupleDesc. */
184 : dshash_table_handle record_table_handle;
185 : /* A hash table for finding a TupleDesc by typmod. */
186 : dshash_table_handle typmod_table_handle;
187 : /* A source of new record typmod numbers. */
188 : pg_atomic_uint32 next_typmod;
189 : };
190 :
191 : /*
192 : * When using shared tuple descriptors as hash table keys we need a way to be
193 : * able to search for an equal shared TupleDesc using a backend-local
194 : * TupleDesc. So we use this type which can hold either, and hash and compare
195 : * functions that know how to handle both.
196 : */
197 : typedef struct SharedRecordTableKey
198 : {
199 : union
200 : {
201 : TupleDesc local_tupdesc;
202 : dsa_pointer shared_tupdesc;
203 : } u;
204 : bool shared;
205 : } SharedRecordTableKey;
206 :
207 : /*
208 : * The shared version of RecordCacheEntry. This lets us look up a typmod
209 : * using a TupleDesc which may be in local or shared memory.
210 : */
211 : typedef struct SharedRecordTableEntry
212 : {
213 : SharedRecordTableKey key;
214 : } SharedRecordTableEntry;
215 :
216 : /*
217 : * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
218 : * up a TupleDesc in shared memory using a typmod.
219 : */
220 : typedef struct SharedTypmodTableEntry
221 : {
222 : uint32 typmod;
223 : dsa_pointer shared_tupdesc;
224 : } SharedTypmodTableEntry;
225 :
226 : static Oid *in_progress_list;
227 : static int in_progress_list_len;
228 : static int in_progress_list_maxlen;
229 :
230 : /*
231 : * A comparator function for SharedRecordTableKey.
232 : */
233 : static int
234 120 : shared_record_table_compare(const void *a, const void *b, size_t size,
235 : void *arg)
236 : {
237 120 : dsa_area *area = (dsa_area *) arg;
238 120 : SharedRecordTableKey *k1 = (SharedRecordTableKey *) a;
239 120 : SharedRecordTableKey *k2 = (SharedRecordTableKey *) b;
240 : TupleDesc t1;
241 : TupleDesc t2;
242 :
243 120 : if (k1->shared)
244 0 : t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
245 : else
246 120 : t1 = k1->u.local_tupdesc;
247 :
248 120 : if (k2->shared)
249 120 : t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
250 : else
251 0 : t2 = k2->u.local_tupdesc;
252 :
253 120 : return equalRowTypes(t1, t2) ? 0 : 1;
254 : }
255 :
256 : /*
257 : * A hash function for SharedRecordTableKey.
258 : */
259 : static uint32
260 248 : shared_record_table_hash(const void *a, size_t size, void *arg)
261 : {
262 248 : dsa_area *area = (dsa_area *) arg;
263 248 : SharedRecordTableKey *k = (SharedRecordTableKey *) a;
264 : TupleDesc t;
265 :
266 248 : if (k->shared)
267 0 : t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
268 : else
269 248 : t = k->u.local_tupdesc;
270 :
271 248 : return hashRowType(t);
272 : }
273 :
274 : /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
275 : static const dshash_parameters srtr_record_table_params = {
276 : sizeof(SharedRecordTableKey), /* unused */
277 : sizeof(SharedRecordTableEntry),
278 : shared_record_table_compare,
279 : shared_record_table_hash,
280 : dshash_memcpy,
281 : LWTRANCHE_PER_SESSION_RECORD_TYPE
282 : };
283 :
284 : /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
285 : static const dshash_parameters srtr_typmod_table_params = {
286 : sizeof(uint32),
287 : sizeof(SharedTypmodTableEntry),
288 : dshash_memcmp,
289 : dshash_memhash,
290 : dshash_memcpy,
291 : LWTRANCHE_PER_SESSION_RECORD_TYPMOD
292 : };
293 :
294 : /* hashtable for recognizing registered record types */
295 : static HTAB *RecordCacheHash = NULL;
296 :
297 : typedef struct RecordCacheArrayEntry
298 : {
299 : uint64 id;
300 : TupleDesc tupdesc;
301 : } RecordCacheArrayEntry;
302 :
303 : /* array of info about registered record types, indexed by assigned typmod */
304 : static RecordCacheArrayEntry *RecordCacheArray = NULL;
305 : static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
306 : static int32 NextRecordTypmod = 0; /* number of entries used */
307 :
308 : /*
309 : * Process-wide counter for generating unique tupledesc identifiers.
310 : * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
311 : * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
312 : */
313 : static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER;
314 :
315 : static void load_typcache_tupdesc(TypeCacheEntry *typentry);
316 : static void load_rangetype_info(TypeCacheEntry *typentry);
317 : static void load_multirangetype_info(TypeCacheEntry *typentry);
318 : static void load_domaintype_info(TypeCacheEntry *typentry);
319 : static int dcs_cmp(const void *a, const void *b);
320 : static void decr_dcc_refcount(DomainConstraintCache *dcc);
321 : static void dccref_deletion_callback(void *arg);
322 : static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
323 : static bool array_element_has_equality(TypeCacheEntry *typentry);
324 : static bool array_element_has_compare(TypeCacheEntry *typentry);
325 : static bool array_element_has_hashing(TypeCacheEntry *typentry);
326 : static bool array_element_has_extended_hashing(TypeCacheEntry *typentry);
327 : static void cache_array_element_properties(TypeCacheEntry *typentry);
328 : static bool record_fields_have_equality(TypeCacheEntry *typentry);
329 : static bool record_fields_have_compare(TypeCacheEntry *typentry);
330 : static bool record_fields_have_hashing(TypeCacheEntry *typentry);
331 : static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry);
332 : static void cache_record_field_properties(TypeCacheEntry *typentry);
333 : static bool range_element_has_hashing(TypeCacheEntry *typentry);
334 : static bool range_element_has_extended_hashing(TypeCacheEntry *typentry);
335 : static void cache_range_element_properties(TypeCacheEntry *typentry);
336 : static bool multirange_element_has_hashing(TypeCacheEntry *typentry);
337 : static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry);
338 : static void cache_multirange_element_properties(TypeCacheEntry *typentry);
339 : static void TypeCacheRelCallback(Datum arg, Oid relid);
340 : static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
341 : static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
342 : static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
343 : static void load_enum_cache_data(TypeCacheEntry *tcache);
344 : static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
345 : static int enum_oid_cmp(const void *left, const void *right);
346 : static void shared_record_typmod_registry_detach(dsm_segment *segment,
347 : Datum datum);
348 : static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc);
349 : static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
350 : uint32 typmod);
351 : static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry);
352 : static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry);
353 :
354 :
355 : /*
356 : * Hash function compatible with one-arg system cache hash function.
357 : */
358 : static uint32
359 815202 : type_cache_syshash(const void *key, Size keysize)
360 : {
361 : Assert(keysize == sizeof(Oid));
362 815202 : return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
363 : }
364 :
365 : /*
366 : * lookup_type_cache
367 : *
368 : * Fetch the type cache entry for the specified datatype, and make sure that
369 : * all the fields requested by bits in 'flags' are valid.
370 : *
371 : * The result is never NULL --- we will ereport() if the passed type OID is
372 : * invalid. Note however that we may fail to find one or more of the
373 : * values requested by 'flags'; the caller needs to check whether the fields
374 : * are InvalidOid or not.
375 : *
376 : * Note that while filling TypeCacheEntry we might process concurrent
377 : * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
378 : * invalidated. In this case, we typically only clear flags while values are
379 : * still available for the caller. It's expected that the caller holds
380 : * enough locks on type-depending objects that the values are still relevant.
381 : * It's also important that the tupdesc is filled after all other
382 : * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
383 : * invalidated during the lookup_type_cache() call.
384 : */
385 : TypeCacheEntry *
386 733932 : lookup_type_cache(Oid type_id, int flags)
387 : {
388 : TypeCacheEntry *typentry;
389 : bool found;
390 : int in_progress_offset;
391 :
392 733932 : if (TypeCacheHash == NULL)
393 : {
394 : /* First time through: initialize the hash table */
395 : HASHCTL ctl;
396 : int allocsize;
397 :
398 8278 : ctl.keysize = sizeof(Oid);
399 8278 : ctl.entrysize = sizeof(TypeCacheEntry);
400 :
401 : /*
402 : * TypeCacheEntry takes hash value from the system cache. For
403 : * TypeCacheHash we use the same hash in order to speedup search by
404 : * hash value. This is used by hash_seq_init_with_hash_value().
405 : */
406 8278 : ctl.hash = type_cache_syshash;
407 :
408 8278 : TypeCacheHash = hash_create("Type information cache", 64,
409 : &ctl, HASH_ELEM | HASH_FUNCTION);
410 :
411 : Assert(RelIdToTypeIdCacheHash == NULL);
412 :
413 8278 : ctl.keysize = sizeof(Oid);
414 8278 : ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
415 8278 : RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
416 : &ctl, HASH_ELEM | HASH_BLOBS);
417 :
418 : /* Also set up callbacks for SI invalidations */
419 8278 : CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
420 8278 : CacheRegisterSyscacheCallback(TYPEOID, TypeCacheTypCallback, (Datum) 0);
421 8278 : CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
422 8278 : CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
423 :
424 : /* Also make sure CacheMemoryContext exists */
425 8278 : if (!CacheMemoryContext)
426 0 : CreateCacheMemoryContext();
427 :
428 : /*
429 : * reserve enough in_progress_list slots for many cases
430 : */
431 8278 : allocsize = 4;
432 8278 : in_progress_list =
433 8278 : MemoryContextAlloc(CacheMemoryContext,
434 : allocsize * sizeof(*in_progress_list));
435 8278 : in_progress_list_maxlen = allocsize;
436 : }
437 :
438 : Assert(TypeCacheHash != NULL && RelIdToTypeIdCacheHash != NULL);
439 :
440 : /* Register to catch invalidation messages */
441 733932 : if (in_progress_list_len >= in_progress_list_maxlen)
442 : {
443 : int allocsize;
444 :
445 0 : allocsize = in_progress_list_maxlen * 2;
446 0 : in_progress_list = repalloc(in_progress_list,
447 : allocsize * sizeof(*in_progress_list));
448 0 : in_progress_list_maxlen = allocsize;
449 : }
450 733932 : in_progress_offset = in_progress_list_len++;
451 733932 : in_progress_list[in_progress_offset] = type_id;
452 :
453 : /* Try to look up an existing entry */
454 733932 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
455 : &type_id,
456 : HASH_FIND, NULL);
457 733932 : if (typentry == NULL)
458 : {
459 : /*
460 : * If we didn't find one, we want to make one. But first look up the
461 : * pg_type row, just to make sure we don't make a cache entry for an
462 : * invalid type OID. If the type OID is not valid, present a
463 : * user-facing error, since some code paths such as domain_in() allow
464 : * this function to be reached with a user-supplied OID.
465 : */
466 : HeapTuple tp;
467 : Form_pg_type typtup;
468 :
469 35450 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
470 35450 : if (!HeapTupleIsValid(tp))
471 0 : ereport(ERROR,
472 : (errcode(ERRCODE_UNDEFINED_OBJECT),
473 : errmsg("type with OID %u does not exist", type_id)));
474 35450 : typtup = (Form_pg_type) GETSTRUCT(tp);
475 35450 : if (!typtup->typisdefined)
476 0 : ereport(ERROR,
477 : (errcode(ERRCODE_UNDEFINED_OBJECT),
478 : errmsg("type \"%s\" is only a shell",
479 : NameStr(typtup->typname))));
480 :
481 : /* Now make the typcache entry */
482 35450 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
483 : &type_id,
484 : HASH_ENTER, &found);
485 : Assert(!found); /* it wasn't there a moment ago */
486 :
487 2233350 : MemSet(typentry, 0, sizeof(TypeCacheEntry));
488 :
489 : /* These fields can never change, by definition */
490 35450 : typentry->type_id = type_id;
491 35450 : typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
492 :
493 : /* Keep this part in sync with the code below */
494 35450 : typentry->typlen = typtup->typlen;
495 35450 : typentry->typbyval = typtup->typbyval;
496 35450 : typentry->typalign = typtup->typalign;
497 35450 : typentry->typstorage = typtup->typstorage;
498 35450 : typentry->typtype = typtup->typtype;
499 35450 : typentry->typrelid = typtup->typrelid;
500 35450 : typentry->typsubscript = typtup->typsubscript;
501 35450 : typentry->typelem = typtup->typelem;
502 35450 : typentry->typarray = typtup->typarray;
503 35450 : typentry->typcollation = typtup->typcollation;
504 35450 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
505 :
506 : /* If it's a domain, immediately thread it into the domain cache list */
507 35450 : if (typentry->typtype == TYPTYPE_DOMAIN)
508 : {
509 1602 : typentry->nextDomain = firstDomainTypeEntry;
510 1602 : firstDomainTypeEntry = typentry;
511 : }
512 :
513 35450 : ReleaseSysCache(tp);
514 : }
515 698482 : else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
516 : {
517 : /*
518 : * We have an entry, but its pg_type row got changed, so reload the
519 : * data obtained directly from pg_type.
520 : */
521 : HeapTuple tp;
522 : Form_pg_type typtup;
523 :
524 584 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
525 584 : if (!HeapTupleIsValid(tp))
526 0 : ereport(ERROR,
527 : (errcode(ERRCODE_UNDEFINED_OBJECT),
528 : errmsg("type with OID %u does not exist", type_id)));
529 584 : typtup = (Form_pg_type) GETSTRUCT(tp);
530 584 : if (!typtup->typisdefined)
531 0 : ereport(ERROR,
532 : (errcode(ERRCODE_UNDEFINED_OBJECT),
533 : errmsg("type \"%s\" is only a shell",
534 : NameStr(typtup->typname))));
535 :
536 : /*
537 : * Keep this part in sync with the code above. Many of these fields
538 : * shouldn't ever change, particularly typtype, but copy 'em anyway.
539 : */
540 584 : typentry->typlen = typtup->typlen;
541 584 : typentry->typbyval = typtup->typbyval;
542 584 : typentry->typalign = typtup->typalign;
543 584 : typentry->typstorage = typtup->typstorage;
544 584 : typentry->typtype = typtup->typtype;
545 584 : typentry->typrelid = typtup->typrelid;
546 584 : typentry->typsubscript = typtup->typsubscript;
547 584 : typentry->typelem = typtup->typelem;
548 584 : typentry->typarray = typtup->typarray;
549 584 : typentry->typcollation = typtup->typcollation;
550 584 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
551 :
552 584 : ReleaseSysCache(tp);
553 : }
554 :
555 : /*
556 : * Look up opclasses if we haven't already and any dependent info is
557 : * requested.
558 : */
559 733932 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
560 : TYPECACHE_CMP_PROC |
561 : TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
562 479198 : TYPECACHE_BTREE_OPFAMILY)) &&
563 479198 : !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
564 : {
565 : Oid opclass;
566 :
567 30894 : opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
568 30894 : if (OidIsValid(opclass))
569 : {
570 29904 : typentry->btree_opf = get_opclass_family(opclass);
571 29904 : typentry->btree_opintype = get_opclass_input_type(opclass);
572 : }
573 : else
574 : {
575 990 : typentry->btree_opf = typentry->btree_opintype = InvalidOid;
576 : }
577 :
578 : /*
579 : * Reset information derived from btree opclass. Note in particular
580 : * that we'll redetermine the eq_opr even if we previously found one;
581 : * this matters in case a btree opclass has been added to a type that
582 : * previously had only a hash opclass.
583 : */
584 30894 : typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
585 : TCFLAGS_CHECKED_LT_OPR |
586 : TCFLAGS_CHECKED_GT_OPR |
587 : TCFLAGS_CHECKED_CMP_PROC);
588 30894 : typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
589 : }
590 :
591 : /*
592 : * If we need to look up equality operator, and there's no btree opclass,
593 : * force lookup of hash opclass.
594 : */
595 733932 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
596 453222 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
597 30622 : typentry->btree_opf == InvalidOid)
598 978 : flags |= TYPECACHE_HASH_OPFAMILY;
599 :
600 733932 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
601 : TYPECACHE_HASH_EXTENDED_PROC |
602 : TYPECACHE_HASH_EXTENDED_PROC_FINFO |
603 308426 : TYPECACHE_HASH_OPFAMILY)) &&
604 308426 : !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
605 : {
606 : Oid opclass;
607 :
608 22646 : opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
609 22646 : if (OidIsValid(opclass))
610 : {
611 22388 : typentry->hash_opf = get_opclass_family(opclass);
612 22388 : typentry->hash_opintype = get_opclass_input_type(opclass);
613 : }
614 : else
615 : {
616 258 : typentry->hash_opf = typentry->hash_opintype = InvalidOid;
617 : }
618 :
619 : /*
620 : * Reset information derived from hash opclass. We do *not* reset the
621 : * eq_opr; if we already found one from the btree opclass, that
622 : * decision is still good.
623 : */
624 22646 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
625 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
626 22646 : typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
627 : }
628 :
629 : /*
630 : * Look for requested operators and functions, if we haven't already.
631 : */
632 733932 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
633 453222 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
634 : {
635 30622 : Oid eq_opr = InvalidOid;
636 :
637 30622 : if (typentry->btree_opf != InvalidOid)
638 29644 : eq_opr = get_opfamily_member(typentry->btree_opf,
639 : typentry->btree_opintype,
640 : typentry->btree_opintype,
641 : BTEqualStrategyNumber);
642 30622 : if (eq_opr == InvalidOid &&
643 978 : typentry->hash_opf != InvalidOid)
644 784 : eq_opr = get_opfamily_member(typentry->hash_opf,
645 : typentry->hash_opintype,
646 : typentry->hash_opintype,
647 : HTEqualStrategyNumber);
648 :
649 : /*
650 : * If the proposed equality operator is array_eq or record_eq, check
651 : * to see if the element type or column types support equality. If
652 : * not, array_eq or record_eq would fail at runtime, so we don't want
653 : * to report that the type has equality. (We can omit similar
654 : * checking for ranges and multiranges because ranges can't be created
655 : * in the first place unless their subtypes support equality.)
656 : */
657 30622 : if (eq_opr == ARRAY_EQ_OP &&
658 2928 : !array_element_has_equality(typentry))
659 378 : eq_opr = InvalidOid;
660 30244 : else if (eq_opr == RECORD_EQ_OP &&
661 438 : !record_fields_have_equality(typentry))
662 194 : eq_opr = InvalidOid;
663 :
664 : /* Force update of eq_opr_finfo only if we're changing state */
665 30622 : if (typentry->eq_opr != eq_opr)
666 28596 : typentry->eq_opr_finfo.fn_oid = InvalidOid;
667 :
668 30622 : typentry->eq_opr = eq_opr;
669 :
670 : /*
671 : * Reset info about hash functions whenever we pick up new info about
672 : * equality operator. This is so we can ensure that the hash
673 : * functions match the operator.
674 : */
675 30622 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
676 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
677 30622 : typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
678 : }
679 733932 : if ((flags & TYPECACHE_LT_OPR) &&
680 286412 : !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
681 : {
682 18104 : Oid lt_opr = InvalidOid;
683 :
684 18104 : if (typentry->btree_opf != InvalidOid)
685 17686 : lt_opr = get_opfamily_member(typentry->btree_opf,
686 : typentry->btree_opintype,
687 : typentry->btree_opintype,
688 : BTLessStrategyNumber);
689 :
690 : /*
691 : * As above, make sure array_cmp or record_cmp will succeed; but again
692 : * we need no special check for ranges or multiranges.
693 : */
694 18104 : if (lt_opr == ARRAY_LT_OP &&
695 2322 : !array_element_has_compare(typentry))
696 562 : lt_opr = InvalidOid;
697 17542 : else if (lt_opr == RECORD_LT_OP &&
698 132 : !record_fields_have_compare(typentry))
699 12 : lt_opr = InvalidOid;
700 :
701 18104 : typentry->lt_opr = lt_opr;
702 18104 : typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
703 : }
704 733932 : if ((flags & TYPECACHE_GT_OPR) &&
705 279690 : !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
706 : {
707 17958 : Oid gt_opr = InvalidOid;
708 :
709 17958 : if (typentry->btree_opf != InvalidOid)
710 17560 : gt_opr = get_opfamily_member(typentry->btree_opf,
711 : typentry->btree_opintype,
712 : typentry->btree_opintype,
713 : BTGreaterStrategyNumber);
714 :
715 : /*
716 : * As above, make sure array_cmp or record_cmp will succeed; but again
717 : * we need no special check for ranges or multiranges.
718 : */
719 17958 : if (gt_opr == ARRAY_GT_OP &&
720 2312 : !array_element_has_compare(typentry))
721 562 : gt_opr = InvalidOid;
722 17396 : else if (gt_opr == RECORD_GT_OP &&
723 132 : !record_fields_have_compare(typentry))
724 12 : gt_opr = InvalidOid;
725 :
726 17958 : typentry->gt_opr = gt_opr;
727 17958 : typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
728 : }
729 733932 : if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
730 26374 : !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
731 : {
732 4270 : Oid cmp_proc = InvalidOid;
733 :
734 4270 : if (typentry->btree_opf != InvalidOid)
735 4054 : cmp_proc = get_opfamily_proc(typentry->btree_opf,
736 : typentry->btree_opintype,
737 : typentry->btree_opintype,
738 : BTORDER_PROC);
739 :
740 : /*
741 : * As above, make sure array_cmp or record_cmp will succeed; but again
742 : * we need no special check for ranges or multiranges.
743 : */
744 4270 : if (cmp_proc == F_BTARRAYCMP &&
745 806 : !array_element_has_compare(typentry))
746 176 : cmp_proc = InvalidOid;
747 4094 : else if (cmp_proc == F_BTRECORDCMP &&
748 250 : !record_fields_have_compare(typentry))
749 176 : cmp_proc = InvalidOid;
750 :
751 : /* Force update of cmp_proc_finfo only if we're changing state */
752 4270 : if (typentry->cmp_proc != cmp_proc)
753 3648 : typentry->cmp_proc_finfo.fn_oid = InvalidOid;
754 :
755 4270 : typentry->cmp_proc = cmp_proc;
756 4270 : typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
757 : }
758 733932 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
759 307648 : !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
760 : {
761 22376 : Oid hash_proc = InvalidOid;
762 :
763 : /*
764 : * We insist that the eq_opr, if one has been determined, match the
765 : * hash opclass; else report there is no hash function.
766 : */
767 22376 : if (typentry->hash_opf != InvalidOid &&
768 43514 : (!OidIsValid(typentry->eq_opr) ||
769 21310 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
770 : typentry->hash_opintype,
771 : typentry->hash_opintype,
772 : HTEqualStrategyNumber)))
773 22204 : hash_proc = get_opfamily_proc(typentry->hash_opf,
774 : typentry->hash_opintype,
775 : typentry->hash_opintype,
776 : HASHSTANDARD_PROC);
777 :
778 : /*
779 : * As above, make sure hash_array, hash_record, or hash_range will
780 : * succeed.
781 : */
782 22376 : if (hash_proc == F_HASH_ARRAY &&
783 1892 : !array_element_has_hashing(typentry))
784 194 : hash_proc = InvalidOid;
785 22182 : else if (hash_proc == F_HASH_RECORD &&
786 422 : !record_fields_have_hashing(typentry))
787 232 : hash_proc = InvalidOid;
788 21950 : else if (hash_proc == F_HASH_RANGE &&
789 120 : !range_element_has_hashing(typentry))
790 6 : hash_proc = InvalidOid;
791 :
792 : /*
793 : * Likewise for hash_multirange.
794 : */
795 22376 : if (hash_proc == F_HASH_MULTIRANGE &&
796 18 : !multirange_element_has_hashing(typentry))
797 6 : hash_proc = InvalidOid;
798 :
799 : /* Force update of hash_proc_finfo only if we're changing state */
800 22376 : if (typentry->hash_proc != hash_proc)
801 20288 : typentry->hash_proc_finfo.fn_oid = InvalidOid;
802 :
803 22376 : typentry->hash_proc = hash_proc;
804 22376 : typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
805 : }
806 733932 : if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
807 8376 : TYPECACHE_HASH_EXTENDED_PROC_FINFO)) &&
808 8376 : !(typentry->flags & TCFLAGS_CHECKED_HASH_EXTENDED_PROC))
809 : {
810 3622 : Oid hash_extended_proc = InvalidOid;
811 :
812 : /*
813 : * We insist that the eq_opr, if one has been determined, match the
814 : * hash opclass; else report there is no hash function.
815 : */
816 3622 : if (typentry->hash_opf != InvalidOid &&
817 6682 : (!OidIsValid(typentry->eq_opr) ||
818 3100 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
819 : typentry->hash_opintype,
820 : typentry->hash_opintype,
821 : HTEqualStrategyNumber)))
822 3582 : hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
823 : typentry->hash_opintype,
824 : typentry->hash_opintype,
825 : HASHEXTENDED_PROC);
826 :
827 : /*
828 : * As above, make sure hash_array_extended, hash_record_extended, or
829 : * hash_range_extended will succeed.
830 : */
831 3622 : if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
832 364 : !array_element_has_extended_hashing(typentry))
833 176 : hash_extended_proc = InvalidOid;
834 3446 : else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
835 190 : !record_fields_have_extended_hashing(typentry))
836 182 : hash_extended_proc = InvalidOid;
837 3264 : else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
838 0 : !range_element_has_extended_hashing(typentry))
839 0 : hash_extended_proc = InvalidOid;
840 :
841 : /*
842 : * Likewise for hash_multirange_extended.
843 : */
844 3622 : if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
845 0 : !multirange_element_has_extended_hashing(typentry))
846 0 : hash_extended_proc = InvalidOid;
847 :
848 : /* Force update of proc finfo only if we're changing state */
849 3622 : if (typentry->hash_extended_proc != hash_extended_proc)
850 3200 : typentry->hash_extended_proc_finfo.fn_oid = InvalidOid;
851 :
852 3622 : typentry->hash_extended_proc = hash_extended_proc;
853 3622 : typentry->flags |= TCFLAGS_CHECKED_HASH_EXTENDED_PROC;
854 : }
855 :
856 : /*
857 : * Set up fmgr lookup info as requested
858 : *
859 : * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
860 : * which is not quite right (they're really in the hash table's private
861 : * memory context) but this will do for our purposes.
862 : *
863 : * Note: the code above avoids invalidating the finfo structs unless the
864 : * referenced operator/function OID actually changes. This is to prevent
865 : * unnecessary leakage of any subsidiary data attached to an finfo, since
866 : * that would cause session-lifespan memory leaks.
867 : */
868 733932 : if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
869 5420 : typentry->eq_opr_finfo.fn_oid == InvalidOid &&
870 1654 : typentry->eq_opr != InvalidOid)
871 : {
872 : Oid eq_opr_func;
873 :
874 1648 : eq_opr_func = get_opcode(typentry->eq_opr);
875 1648 : if (eq_opr_func != InvalidOid)
876 1648 : fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
877 : CacheMemoryContext);
878 : }
879 733932 : if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
880 15190 : typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
881 4142 : typentry->cmp_proc != InvalidOid)
882 : {
883 1530 : fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
884 : CacheMemoryContext);
885 : }
886 733932 : if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
887 8230 : typentry->hash_proc_finfo.fn_oid == InvalidOid &&
888 1660 : typentry->hash_proc != InvalidOid)
889 : {
890 1460 : fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
891 : CacheMemoryContext);
892 : }
893 733932 : if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
894 114 : typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
895 36 : typentry->hash_extended_proc != InvalidOid)
896 : {
897 24 : fmgr_info_cxt(typentry->hash_extended_proc,
898 : &typentry->hash_extended_proc_finfo,
899 : CacheMemoryContext);
900 : }
901 :
902 : /*
903 : * If it's a composite type (row type), get tupdesc if requested
904 : */
905 733932 : if ((flags & TYPECACHE_TUPDESC) &&
906 84120 : typentry->tupDesc == NULL &&
907 3918 : typentry->typtype == TYPTYPE_COMPOSITE)
908 : {
909 3792 : load_typcache_tupdesc(typentry);
910 : }
911 :
912 : /*
913 : * If requested, get information about a range type
914 : *
915 : * This includes making sure that the basic info about the range element
916 : * type is up-to-date.
917 : */
918 733932 : if ((flags & TYPECACHE_RANGE_INFO) &&
919 28718 : typentry->typtype == TYPTYPE_RANGE)
920 : {
921 28718 : if (typentry->rngelemtype == NULL)
922 756 : load_rangetype_info(typentry);
923 27962 : else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
924 2 : (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
925 : }
926 :
927 : /*
928 : * If requested, get information about a multirange type
929 : */
930 733932 : if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
931 11818 : typentry->rngtype == NULL &&
932 200 : typentry->typtype == TYPTYPE_MULTIRANGE)
933 : {
934 200 : load_multirangetype_info(typentry);
935 : }
936 :
937 : /*
938 : * If requested, get information about a domain type
939 : */
940 733932 : if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
941 8464 : typentry->domainBaseType == InvalidOid &&
942 5540 : typentry->typtype == TYPTYPE_DOMAIN)
943 : {
944 480 : typentry->domainBaseTypmod = -1;
945 480 : typentry->domainBaseType =
946 480 : getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
947 : }
948 733932 : if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
949 42986 : (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
950 4960 : typentry->typtype == TYPTYPE_DOMAIN)
951 : {
952 2618 : load_domaintype_info(typentry);
953 : }
954 :
955 733932 : INJECTION_POINT("typecache-before-rel-type-cache-insert", NULL);
956 :
957 : Assert(in_progress_offset + 1 == in_progress_list_len);
958 733930 : in_progress_list_len--;
959 :
960 733930 : insert_rel_type_cache_if_needed(typentry);
961 :
962 733930 : return typentry;
963 : }
964 :
965 : /*
966 : * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
967 : */
968 : static void
969 4030 : load_typcache_tupdesc(TypeCacheEntry *typentry)
970 : {
971 : Relation rel;
972 :
973 4030 : if (!OidIsValid(typentry->typrelid)) /* should not happen */
974 0 : elog(ERROR, "invalid typrelid for composite type %u",
975 : typentry->type_id);
976 4030 : rel = relation_open(typentry->typrelid, AccessShareLock);
977 : Assert(rel->rd_rel->reltype == typentry->type_id);
978 :
979 : /*
980 : * Link to the tupdesc and increment its refcount (we assert it's a
981 : * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
982 : * because the reference mustn't be entered in the current resource owner;
983 : * it can outlive the current query.
984 : */
985 4030 : typentry->tupDesc = RelationGetDescr(rel);
986 :
987 : Assert(typentry->tupDesc->tdrefcount > 0);
988 4030 : typentry->tupDesc->tdrefcount++;
989 :
990 : /*
991 : * In future, we could take some pains to not change tupDesc_identifier if
992 : * the tupdesc didn't really change; but for now it's not worth it.
993 : */
994 4030 : typentry->tupDesc_identifier = ++tupledesc_id_counter;
995 :
996 4030 : relation_close(rel, AccessShareLock);
997 4030 : }
998 :
999 : /*
1000 : * load_rangetype_info --- helper routine to set up range type information
1001 : */
1002 : static void
1003 844 : load_rangetype_info(TypeCacheEntry *typentry)
1004 : {
1005 : Form_pg_range pg_range;
1006 : HeapTuple tup;
1007 : Oid subtypeOid;
1008 : Oid opclassOid;
1009 : Oid canonicalOid;
1010 : Oid subdiffOid;
1011 : Oid opfamilyOid;
1012 : Oid opcintype;
1013 : Oid cmpFnOid;
1014 :
1015 : /* get information from pg_range */
1016 844 : tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
1017 : /* should not fail, since we already checked typtype ... */
1018 844 : if (!HeapTupleIsValid(tup))
1019 0 : elog(ERROR, "cache lookup failed for range type %u",
1020 : typentry->type_id);
1021 844 : pg_range = (Form_pg_range) GETSTRUCT(tup);
1022 :
1023 844 : subtypeOid = pg_range->rngsubtype;
1024 844 : typentry->rng_collation = pg_range->rngcollation;
1025 844 : opclassOid = pg_range->rngsubopc;
1026 844 : canonicalOid = pg_range->rngcanonical;
1027 844 : subdiffOid = pg_range->rngsubdiff;
1028 :
1029 844 : ReleaseSysCache(tup);
1030 :
1031 : /* get opclass properties and look up the comparison function */
1032 844 : opfamilyOid = get_opclass_family(opclassOid);
1033 844 : opcintype = get_opclass_input_type(opclassOid);
1034 844 : typentry->rng_opfamily = opfamilyOid;
1035 :
1036 844 : cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1037 : BTORDER_PROC);
1038 844 : if (!RegProcedureIsValid(cmpFnOid))
1039 0 : elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1040 : BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1041 :
1042 : /* set up cached fmgrinfo structs */
1043 844 : fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
1044 : CacheMemoryContext);
1045 844 : if (OidIsValid(canonicalOid))
1046 572 : fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
1047 : CacheMemoryContext);
1048 844 : if (OidIsValid(subdiffOid))
1049 704 : fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
1050 : CacheMemoryContext);
1051 :
1052 : /* Lastly, set up link to the element type --- this marks data valid */
1053 844 : typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
1054 844 : }
1055 :
1056 : /*
1057 : * load_multirangetype_info --- helper routine to set up multirange type
1058 : * information
1059 : */
1060 : static void
1061 200 : load_multirangetype_info(TypeCacheEntry *typentry)
1062 : {
1063 : Oid rangetypeOid;
1064 :
1065 200 : rangetypeOid = get_multirange_range(typentry->type_id);
1066 200 : if (!OidIsValid(rangetypeOid))
1067 0 : elog(ERROR, "cache lookup failed for multirange type %u",
1068 : typentry->type_id);
1069 :
1070 200 : typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
1071 200 : }
1072 :
1073 : /*
1074 : * load_domaintype_info --- helper routine to set up domain constraint info
1075 : *
1076 : * Note: we assume we're called in a relatively short-lived context, so it's
1077 : * okay to leak data into the current context while scanning pg_constraint.
1078 : * We build the new DomainConstraintCache data in a context underneath
1079 : * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1080 : * complete.
1081 : */
1082 : static void
1083 2618 : load_domaintype_info(TypeCacheEntry *typentry)
1084 : {
1085 2618 : Oid typeOid = typentry->type_id;
1086 : DomainConstraintCache *dcc;
1087 2618 : bool notNull = false;
1088 : DomainConstraintState **ccons;
1089 : int cconslen;
1090 : Relation conRel;
1091 : MemoryContext oldcxt;
1092 :
1093 : /*
1094 : * If we're here, any existing constraint info is stale, so release it.
1095 : * For safety, be sure to null the link before trying to delete the data.
1096 : */
1097 2618 : if (typentry->domainData)
1098 : {
1099 692 : dcc = typentry->domainData;
1100 692 : typentry->domainData = NULL;
1101 692 : decr_dcc_refcount(dcc);
1102 : }
1103 :
1104 : /*
1105 : * We try to optimize the common case of no domain constraints, so don't
1106 : * create the dcc object and context until we find a constraint. Likewise
1107 : * for the temp sorting array.
1108 : */
1109 2618 : dcc = NULL;
1110 2618 : ccons = NULL;
1111 2618 : cconslen = 0;
1112 :
1113 : /*
1114 : * Scan pg_constraint for relevant constraints. We want to find
1115 : * constraints for not just this domain, but any ancestor domains, so the
1116 : * outer loop crawls up the domain stack.
1117 : */
1118 2618 : conRel = table_open(ConstraintRelationId, AccessShareLock);
1119 :
1120 : for (;;)
1121 2654 : {
1122 : HeapTuple tup;
1123 : HeapTuple conTup;
1124 : Form_pg_type typTup;
1125 5272 : int nccons = 0;
1126 : ScanKeyData key[1];
1127 : SysScanDesc scan;
1128 :
1129 5272 : tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1130 5272 : if (!HeapTupleIsValid(tup))
1131 0 : elog(ERROR, "cache lookup failed for type %u", typeOid);
1132 5272 : typTup = (Form_pg_type) GETSTRUCT(tup);
1133 :
1134 5272 : if (typTup->typtype != TYPTYPE_DOMAIN)
1135 : {
1136 : /* Not a domain, so done */
1137 2618 : ReleaseSysCache(tup);
1138 2618 : break;
1139 : }
1140 :
1141 : /* Test for NOT NULL Constraint */
1142 2654 : if (typTup->typnotnull)
1143 126 : notNull = true;
1144 :
1145 : /* Look for CHECK Constraints on this domain */
1146 2654 : ScanKeyInit(&key[0],
1147 : Anum_pg_constraint_contypid,
1148 : BTEqualStrategyNumber, F_OIDEQ,
1149 : ObjectIdGetDatum(typeOid));
1150 :
1151 2654 : scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1152 : NULL, 1, key);
1153 :
1154 4052 : while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1155 : {
1156 1398 : Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
1157 : Datum val;
1158 : bool isNull;
1159 : char *constring;
1160 : Expr *check_expr;
1161 : DomainConstraintState *r;
1162 :
1163 : /* Ignore non-CHECK constraints */
1164 1398 : if (c->contype != CONSTRAINT_CHECK)
1165 126 : continue;
1166 :
1167 : /* Not expecting conbin to be NULL, but we'll test for it anyway */
1168 1272 : val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1169 : conRel->rd_att, &isNull);
1170 1272 : if (isNull)
1171 0 : elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1172 : NameStr(typTup->typname), NameStr(c->conname));
1173 :
1174 : /* Create the DomainConstraintCache object and context if needed */
1175 1272 : if (dcc == NULL)
1176 : {
1177 : MemoryContext cxt;
1178 :
1179 1244 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1180 : "Domain constraints",
1181 : ALLOCSET_SMALL_SIZES);
1182 : dcc = (DomainConstraintCache *)
1183 1244 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1184 1244 : dcc->constraints = NIL;
1185 1244 : dcc->dccContext = cxt;
1186 1244 : dcc->dccRefCount = 0;
1187 : }
1188 :
1189 : /* Convert conbin to a node tree, still in caller's context */
1190 1272 : constring = TextDatumGetCString(val);
1191 1272 : check_expr = (Expr *) stringToNode(constring);
1192 :
1193 : /*
1194 : * Plan the expression, since ExecInitExpr will expect that.
1195 : *
1196 : * Note: caching the result of expression_planner() is not very
1197 : * good practice. Ideally we'd use a CachedExpression here so
1198 : * that we would react promptly to, eg, changes in inlined
1199 : * functions. However, because we don't support mutable domain
1200 : * CHECK constraints, it's not really clear that it's worth the
1201 : * extra overhead to do that.
1202 : */
1203 1272 : check_expr = expression_planner(check_expr);
1204 :
1205 : /* Create only the minimally needed stuff in dccContext */
1206 1272 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1207 :
1208 1272 : r = makeNode(DomainConstraintState);
1209 1272 : r->constrainttype = DOM_CONSTRAINT_CHECK;
1210 1272 : r->name = pstrdup(NameStr(c->conname));
1211 1272 : r->check_expr = copyObject(check_expr);
1212 1272 : r->check_exprstate = NULL;
1213 :
1214 1272 : MemoryContextSwitchTo(oldcxt);
1215 :
1216 : /* Accumulate constraints in an array, for sorting below */
1217 1272 : if (ccons == NULL)
1218 : {
1219 1244 : cconslen = 8;
1220 : ccons = (DomainConstraintState **)
1221 1244 : palloc(cconslen * sizeof(DomainConstraintState *));
1222 : }
1223 28 : else if (nccons >= cconslen)
1224 : {
1225 0 : cconslen *= 2;
1226 : ccons = (DomainConstraintState **)
1227 0 : repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1228 : }
1229 1272 : ccons[nccons++] = r;
1230 : }
1231 :
1232 2654 : systable_endscan(scan);
1233 :
1234 2654 : if (nccons > 0)
1235 : {
1236 : /*
1237 : * Sort the items for this domain, so that CHECKs are applied in a
1238 : * deterministic order.
1239 : */
1240 1262 : if (nccons > 1)
1241 8 : qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1242 :
1243 : /*
1244 : * Now attach them to the overall list. Use lcons() here because
1245 : * constraints of parent domains should be applied earlier.
1246 : */
1247 1262 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1248 2534 : while (nccons > 0)
1249 1272 : dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1250 1262 : MemoryContextSwitchTo(oldcxt);
1251 : }
1252 :
1253 : /* loop to next domain in stack */
1254 2654 : typeOid = typTup->typbasetype;
1255 2654 : ReleaseSysCache(tup);
1256 : }
1257 :
1258 2618 : table_close(conRel, AccessShareLock);
1259 :
1260 : /*
1261 : * Only need to add one NOT NULL check regardless of how many domains in
1262 : * the stack request it.
1263 : */
1264 2618 : if (notNull)
1265 : {
1266 : DomainConstraintState *r;
1267 :
1268 : /* Create the DomainConstraintCache object and context if needed */
1269 126 : if (dcc == NULL)
1270 : {
1271 : MemoryContext cxt;
1272 :
1273 96 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1274 : "Domain constraints",
1275 : ALLOCSET_SMALL_SIZES);
1276 : dcc = (DomainConstraintCache *)
1277 96 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1278 96 : dcc->constraints = NIL;
1279 96 : dcc->dccContext = cxt;
1280 96 : dcc->dccRefCount = 0;
1281 : }
1282 :
1283 : /* Create node trees in DomainConstraintCache's context */
1284 126 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1285 :
1286 126 : r = makeNode(DomainConstraintState);
1287 :
1288 126 : r->constrainttype = DOM_CONSTRAINT_NOTNULL;
1289 126 : r->name = pstrdup("NOT NULL");
1290 126 : r->check_expr = NULL;
1291 126 : r->check_exprstate = NULL;
1292 :
1293 : /* lcons to apply the nullness check FIRST */
1294 126 : dcc->constraints = lcons(r, dcc->constraints);
1295 :
1296 126 : MemoryContextSwitchTo(oldcxt);
1297 : }
1298 :
1299 : /*
1300 : * If we made a constraint object, move it into CacheMemoryContext and
1301 : * attach it to the typcache entry.
1302 : */
1303 2618 : if (dcc)
1304 : {
1305 1340 : MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
1306 1340 : typentry->domainData = dcc;
1307 1340 : dcc->dccRefCount++; /* count the typcache's reference */
1308 : }
1309 :
1310 : /* Either way, the typcache entry's domain data is now valid. */
1311 2618 : typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
1312 2618 : }
1313 :
1314 : /*
1315 : * qsort comparator to sort DomainConstraintState pointers by name
1316 : */
1317 : static int
1318 10 : dcs_cmp(const void *a, const void *b)
1319 : {
1320 10 : const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1321 10 : const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1322 :
1323 10 : return strcmp((*ca)->name, (*cb)->name);
1324 : }
1325 :
1326 : /*
1327 : * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1328 : * and free it if no references remain
1329 : */
1330 : static void
1331 12886 : decr_dcc_refcount(DomainConstraintCache *dcc)
1332 : {
1333 : Assert(dcc->dccRefCount > 0);
1334 12886 : if (--(dcc->dccRefCount) <= 0)
1335 688 : MemoryContextDelete(dcc->dccContext);
1336 12886 : }
1337 :
1338 : /*
1339 : * Context reset/delete callback for a DomainConstraintRef
1340 : */
1341 : static void
1342 12884 : dccref_deletion_callback(void *arg)
1343 : {
1344 12884 : DomainConstraintRef *ref = (DomainConstraintRef *) arg;
1345 12884 : DomainConstraintCache *dcc = ref->dcc;
1346 :
1347 : /* Paranoia --- be sure link is nulled before trying to release */
1348 12884 : if (dcc)
1349 : {
1350 12194 : ref->constraints = NIL;
1351 12194 : ref->dcc = NULL;
1352 12194 : decr_dcc_refcount(dcc);
1353 : }
1354 12884 : }
1355 :
1356 : /*
1357 : * prep_domain_constraints --- prepare domain constraints for execution
1358 : *
1359 : * The expression trees stored in the DomainConstraintCache's list are
1360 : * converted to executable expression state trees stored in execctx.
1361 : */
1362 : static List *
1363 2554 : prep_domain_constraints(List *constraints, MemoryContext execctx)
1364 : {
1365 2554 : List *result = NIL;
1366 : MemoryContext oldcxt;
1367 : ListCell *lc;
1368 :
1369 2554 : oldcxt = MemoryContextSwitchTo(execctx);
1370 :
1371 5132 : foreach(lc, constraints)
1372 : {
1373 2578 : DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
1374 : DomainConstraintState *newr;
1375 :
1376 2578 : newr = makeNode(DomainConstraintState);
1377 2578 : newr->constrainttype = r->constrainttype;
1378 2578 : newr->name = r->name;
1379 2578 : newr->check_expr = r->check_expr;
1380 2578 : newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1381 :
1382 2578 : result = lappend(result, newr);
1383 : }
1384 :
1385 2554 : MemoryContextSwitchTo(oldcxt);
1386 :
1387 2554 : return result;
1388 : }
1389 :
1390 : /*
1391 : * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1392 : *
1393 : * Caller must tell us the MemoryContext in which the DomainConstraintRef
1394 : * lives. The ref will be cleaned up when that context is reset/deleted.
1395 : *
1396 : * Caller must also tell us whether it wants check_exprstate fields to be
1397 : * computed in the DomainConstraintState nodes attached to this ref.
1398 : * If it doesn't, we need not make a copy of the DomainConstraintState list.
1399 : */
1400 : void
1401 12912 : InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
1402 : MemoryContext refctx, bool need_exprstate)
1403 : {
1404 : /* Look up the typcache entry --- we assume it survives indefinitely */
1405 12912 : ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1406 12912 : ref->need_exprstate = need_exprstate;
1407 : /* For safety, establish the callback before acquiring a refcount */
1408 12912 : ref->refctx = refctx;
1409 12912 : ref->dcc = NULL;
1410 12912 : ref->callback.func = dccref_deletion_callback;
1411 12912 : ref->callback.arg = ref;
1412 12912 : MemoryContextRegisterResetCallback(refctx, &ref->callback);
1413 : /* Acquire refcount if there are constraints, and set up exported list */
1414 12912 : if (ref->tcache->domainData)
1415 : {
1416 12222 : ref->dcc = ref->tcache->domainData;
1417 12222 : ref->dcc->dccRefCount++;
1418 12222 : if (ref->need_exprstate)
1419 2554 : ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1420 : ref->refctx);
1421 : else
1422 9668 : ref->constraints = ref->dcc->constraints;
1423 : }
1424 : else
1425 690 : ref->constraints = NIL;
1426 12912 : }
1427 :
1428 : /*
1429 : * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1430 : *
1431 : * If the domain's constraint set changed, ref->constraints is updated to
1432 : * point at a new list of cached constraints.
1433 : *
1434 : * In the normal case where nothing happened to the domain, this is cheap
1435 : * enough that it's reasonable (and expected) to check before *each* use
1436 : * of the constraint info.
1437 : */
1438 : void
1439 438566 : UpdateDomainConstraintRef(DomainConstraintRef *ref)
1440 : {
1441 438566 : TypeCacheEntry *typentry = ref->tcache;
1442 :
1443 : /* Make sure typcache entry's data is up to date */
1444 438566 : if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1445 0 : typentry->typtype == TYPTYPE_DOMAIN)
1446 0 : load_domaintype_info(typentry);
1447 :
1448 : /* Transfer to ref object if there's new info, adjusting refcounts */
1449 438566 : if (ref->dcc != typentry->domainData)
1450 : {
1451 : /* Paranoia --- be sure link is nulled before trying to release */
1452 0 : DomainConstraintCache *dcc = ref->dcc;
1453 :
1454 0 : if (dcc)
1455 : {
1456 : /*
1457 : * Note: we just leak the previous list of executable domain
1458 : * constraints. Alternatively, we could keep those in a child
1459 : * context of ref->refctx and free that context at this point.
1460 : * However, in practice this code path will be taken so seldom
1461 : * that the extra bookkeeping for a child context doesn't seem
1462 : * worthwhile; we'll just allow a leak for the lifespan of refctx.
1463 : */
1464 0 : ref->constraints = NIL;
1465 0 : ref->dcc = NULL;
1466 0 : decr_dcc_refcount(dcc);
1467 : }
1468 0 : dcc = typentry->domainData;
1469 0 : if (dcc)
1470 : {
1471 0 : ref->dcc = dcc;
1472 0 : dcc->dccRefCount++;
1473 0 : if (ref->need_exprstate)
1474 0 : ref->constraints = prep_domain_constraints(dcc->constraints,
1475 : ref->refctx);
1476 : else
1477 0 : ref->constraints = dcc->constraints;
1478 : }
1479 : }
1480 438566 : }
1481 :
1482 : /*
1483 : * DomainHasConstraints --- utility routine to check if a domain has constraints
1484 : *
1485 : * This is defined to return false, not fail, if type is not a domain.
1486 : */
1487 : bool
1488 30074 : DomainHasConstraints(Oid type_id)
1489 : {
1490 : TypeCacheEntry *typentry;
1491 :
1492 : /*
1493 : * Note: a side effect is to cause the typcache's domain data to become
1494 : * valid. This is fine since we'll likely need it soon if there is any.
1495 : */
1496 30074 : typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1497 :
1498 30074 : return (typentry->domainData != NULL);
1499 : }
1500 :
1501 :
1502 : /*
1503 : * array_element_has_equality and friends are helper routines to check
1504 : * whether we should believe that array_eq and related functions will work
1505 : * on the given array type or composite type.
1506 : *
1507 : * The logic above may call these repeatedly on the same type entry, so we
1508 : * make use of the typentry->flags field to cache the results once known.
1509 : * Also, we assume that we'll probably want all these facts about the type
1510 : * if we want any, so we cache them all using only one lookup of the
1511 : * component datatype(s).
1512 : */
1513 :
1514 : static bool
1515 2928 : array_element_has_equality(TypeCacheEntry *typentry)
1516 : {
1517 2928 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1518 2548 : cache_array_element_properties(typentry);
1519 2928 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1520 : }
1521 :
1522 : static bool
1523 5440 : array_element_has_compare(TypeCacheEntry *typentry)
1524 : {
1525 5440 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1526 428 : cache_array_element_properties(typentry);
1527 5440 : return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1528 : }
1529 :
1530 : static bool
1531 1892 : array_element_has_hashing(TypeCacheEntry *typentry)
1532 : {
1533 1892 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1534 0 : cache_array_element_properties(typentry);
1535 1892 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1536 : }
1537 :
1538 : static bool
1539 364 : array_element_has_extended_hashing(TypeCacheEntry *typentry)
1540 : {
1541 364 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1542 0 : cache_array_element_properties(typentry);
1543 364 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1544 : }
1545 :
1546 : static void
1547 2976 : cache_array_element_properties(TypeCacheEntry *typentry)
1548 : {
1549 2976 : Oid elem_type = get_base_element_type(typentry->type_id);
1550 :
1551 2976 : if (OidIsValid(elem_type))
1552 : {
1553 : TypeCacheEntry *elementry;
1554 :
1555 2774 : elementry = lookup_type_cache(elem_type,
1556 : TYPECACHE_EQ_OPR |
1557 : TYPECACHE_CMP_PROC |
1558 : TYPECACHE_HASH_PROC |
1559 : TYPECACHE_HASH_EXTENDED_PROC);
1560 2774 : if (OidIsValid(elementry->eq_opr))
1561 2598 : typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1562 2774 : if (OidIsValid(elementry->cmp_proc))
1563 2400 : typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1564 2774 : if (OidIsValid(elementry->hash_proc))
1565 2586 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1566 2774 : if (OidIsValid(elementry->hash_extended_proc))
1567 2586 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1568 : }
1569 2976 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1570 2976 : }
1571 :
1572 : /*
1573 : * Likewise, some helper functions for composite types.
1574 : */
1575 :
1576 : static bool
1577 438 : record_fields_have_equality(TypeCacheEntry *typentry)
1578 : {
1579 438 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1580 410 : cache_record_field_properties(typentry);
1581 438 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1582 : }
1583 :
1584 : static bool
1585 514 : record_fields_have_compare(TypeCacheEntry *typentry)
1586 : {
1587 514 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1588 60 : cache_record_field_properties(typentry);
1589 514 : return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1590 : }
1591 :
1592 : static bool
1593 422 : record_fields_have_hashing(TypeCacheEntry *typentry)
1594 : {
1595 422 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1596 6 : cache_record_field_properties(typentry);
1597 422 : return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1598 : }
1599 :
1600 : static bool
1601 190 : record_fields_have_extended_hashing(TypeCacheEntry *typentry)
1602 : {
1603 190 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1604 0 : cache_record_field_properties(typentry);
1605 190 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1606 : }
1607 :
1608 : static void
1609 476 : cache_record_field_properties(TypeCacheEntry *typentry)
1610 : {
1611 : /*
1612 : * For type RECORD, we can't really tell what will work, since we don't
1613 : * have access here to the specific anonymous type. Just assume that
1614 : * equality and comparison will (we may get a failure at runtime). We
1615 : * could also claim that hashing works, but then if code that has the
1616 : * option between a comparison-based (sort-based) and a hash-based plan
1617 : * chooses hashing, stuff could fail that would otherwise work if it chose
1618 : * a comparison-based plan. In practice more types support comparison
1619 : * than hashing.
1620 : */
1621 476 : if (typentry->type_id == RECORDOID)
1622 : {
1623 50 : typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1624 : TCFLAGS_HAVE_FIELD_COMPARE);
1625 : }
1626 426 : else if (typentry->typtype == TYPTYPE_COMPOSITE)
1627 : {
1628 : TupleDesc tupdesc;
1629 : int newflags;
1630 : int i;
1631 :
1632 : /* Fetch composite type's tupdesc if we don't have it already */
1633 426 : if (typentry->tupDesc == NULL)
1634 238 : load_typcache_tupdesc(typentry);
1635 426 : tupdesc = typentry->tupDesc;
1636 :
1637 : /* Must bump the refcount while we do additional catalog lookups */
1638 426 : IncrTupleDescRefCount(tupdesc);
1639 :
1640 : /* Have each property if all non-dropped fields have the property */
1641 426 : newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1642 : TCFLAGS_HAVE_FIELD_COMPARE |
1643 : TCFLAGS_HAVE_FIELD_HASHING |
1644 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1645 5582 : for (i = 0; i < tupdesc->natts; i++)
1646 : {
1647 : TypeCacheEntry *fieldentry;
1648 5350 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1649 :
1650 5350 : if (attr->attisdropped)
1651 0 : continue;
1652 :
1653 5350 : fieldentry = lookup_type_cache(attr->atttypid,
1654 : TYPECACHE_EQ_OPR |
1655 : TYPECACHE_CMP_PROC |
1656 : TYPECACHE_HASH_PROC |
1657 : TYPECACHE_HASH_EXTENDED_PROC);
1658 5350 : if (!OidIsValid(fieldentry->eq_opr))
1659 194 : newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1660 5350 : if (!OidIsValid(fieldentry->cmp_proc))
1661 194 : newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1662 5350 : if (!OidIsValid(fieldentry->hash_proc))
1663 200 : newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1664 5350 : if (!OidIsValid(fieldentry->hash_extended_proc))
1665 200 : newflags &= ~TCFLAGS_HAVE_FIELD_EXTENDED_HASHING;
1666 :
1667 : /* We can drop out of the loop once we disprove all bits */
1668 5350 : if (newflags == 0)
1669 194 : break;
1670 : }
1671 426 : typentry->flags |= newflags;
1672 :
1673 426 : DecrTupleDescRefCount(tupdesc);
1674 : }
1675 0 : else if (typentry->typtype == TYPTYPE_DOMAIN)
1676 : {
1677 : /* If it's domain over composite, copy base type's properties */
1678 : TypeCacheEntry *baseentry;
1679 :
1680 : /* load up basetype info if we didn't already */
1681 0 : if (typentry->domainBaseType == InvalidOid)
1682 : {
1683 0 : typentry->domainBaseTypmod = -1;
1684 0 : typentry->domainBaseType =
1685 0 : getBaseTypeAndTypmod(typentry->type_id,
1686 : &typentry->domainBaseTypmod);
1687 : }
1688 0 : baseentry = lookup_type_cache(typentry->domainBaseType,
1689 : TYPECACHE_EQ_OPR |
1690 : TYPECACHE_CMP_PROC |
1691 : TYPECACHE_HASH_PROC |
1692 : TYPECACHE_HASH_EXTENDED_PROC);
1693 0 : if (baseentry->typtype == TYPTYPE_COMPOSITE)
1694 : {
1695 0 : typentry->flags |= TCFLAGS_DOMAIN_BASE_IS_COMPOSITE;
1696 0 : typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1697 : TCFLAGS_HAVE_FIELD_COMPARE |
1698 : TCFLAGS_HAVE_FIELD_HASHING |
1699 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1700 : }
1701 : }
1702 476 : typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
1703 476 : }
1704 :
1705 : /*
1706 : * Likewise, some helper functions for range and multirange types.
1707 : *
1708 : * We can borrow the flag bits for array element properties to use for range
1709 : * element properties, since those flag bits otherwise have no use in a
1710 : * range or multirange type's typcache entry.
1711 : */
1712 :
1713 : static bool
1714 120 : range_element_has_hashing(TypeCacheEntry *typentry)
1715 : {
1716 120 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1717 120 : cache_range_element_properties(typentry);
1718 120 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1719 : }
1720 :
1721 : static bool
1722 0 : range_element_has_extended_hashing(TypeCacheEntry *typentry)
1723 : {
1724 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1725 0 : cache_range_element_properties(typentry);
1726 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1727 : }
1728 :
1729 : static void
1730 120 : cache_range_element_properties(TypeCacheEntry *typentry)
1731 : {
1732 : /* load up subtype link if we didn't already */
1733 120 : if (typentry->rngelemtype == NULL &&
1734 88 : typentry->typtype == TYPTYPE_RANGE)
1735 88 : load_rangetype_info(typentry);
1736 :
1737 120 : if (typentry->rngelemtype != NULL)
1738 : {
1739 : TypeCacheEntry *elementry;
1740 :
1741 : /* might need to calculate subtype's hash function properties */
1742 120 : elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1743 : TYPECACHE_HASH_PROC |
1744 : TYPECACHE_HASH_EXTENDED_PROC);
1745 120 : if (OidIsValid(elementry->hash_proc))
1746 114 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1747 120 : if (OidIsValid(elementry->hash_extended_proc))
1748 114 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1749 : }
1750 120 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1751 120 : }
1752 :
1753 : static bool
1754 18 : multirange_element_has_hashing(TypeCacheEntry *typentry)
1755 : {
1756 18 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1757 18 : cache_multirange_element_properties(typentry);
1758 18 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1759 : }
1760 :
1761 : static bool
1762 0 : multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
1763 : {
1764 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1765 0 : cache_multirange_element_properties(typentry);
1766 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1767 : }
1768 :
1769 : static void
1770 18 : cache_multirange_element_properties(TypeCacheEntry *typentry)
1771 : {
1772 : /* load up range link if we didn't already */
1773 18 : if (typentry->rngtype == NULL &&
1774 0 : typentry->typtype == TYPTYPE_MULTIRANGE)
1775 0 : load_multirangetype_info(typentry);
1776 :
1777 18 : if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1778 : {
1779 : TypeCacheEntry *elementry;
1780 :
1781 : /* might need to calculate subtype's hash function properties */
1782 18 : elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1783 : TYPECACHE_HASH_PROC |
1784 : TYPECACHE_HASH_EXTENDED_PROC);
1785 18 : if (OidIsValid(elementry->hash_proc))
1786 12 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1787 18 : if (OidIsValid(elementry->hash_extended_proc))
1788 12 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1789 : }
1790 18 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1791 18 : }
1792 :
1793 : /*
1794 : * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1795 : * to store 'typmod'.
1796 : */
1797 : static void
1798 17392 : ensure_record_cache_typmod_slot_exists(int32 typmod)
1799 : {
1800 17392 : if (RecordCacheArray == NULL)
1801 : {
1802 6852 : RecordCacheArray = (RecordCacheArrayEntry *)
1803 6852 : MemoryContextAllocZero(CacheMemoryContext,
1804 : 64 * sizeof(RecordCacheArrayEntry));
1805 6852 : RecordCacheArrayLen = 64;
1806 : }
1807 :
1808 17392 : if (typmod >= RecordCacheArrayLen)
1809 : {
1810 0 : int32 newlen = pg_nextpower2_32(typmod + 1);
1811 :
1812 0 : RecordCacheArray = repalloc0_array(RecordCacheArray,
1813 : RecordCacheArrayEntry,
1814 : RecordCacheArrayLen,
1815 : newlen);
1816 0 : RecordCacheArrayLen = newlen;
1817 : }
1818 17392 : }
1819 :
1820 : /*
1821 : * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1822 : *
1823 : * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1824 : * hasn't had its refcount bumped.
1825 : */
1826 : static TupleDesc
1827 124368 : lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1828 : {
1829 124368 : if (type_id != RECORDOID)
1830 : {
1831 : /*
1832 : * It's a named composite type, so use the regular typcache.
1833 : */
1834 : TypeCacheEntry *typentry;
1835 :
1836 58508 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1837 58506 : if (typentry->tupDesc == NULL && !noError)
1838 0 : ereport(ERROR,
1839 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1840 : errmsg("type %s is not composite",
1841 : format_type_be(type_id))));
1842 58506 : return typentry->tupDesc;
1843 : }
1844 : else
1845 : {
1846 : /*
1847 : * It's a transient record type, so look in our record-type table.
1848 : */
1849 65860 : if (typmod >= 0)
1850 : {
1851 : /* It is already in our local cache? */
1852 65844 : if (typmod < RecordCacheArrayLen &&
1853 65838 : RecordCacheArray[typmod].tupdesc != NULL)
1854 65814 : return RecordCacheArray[typmod].tupdesc;
1855 :
1856 : /* Are we attached to a shared record typmod registry? */
1857 30 : if (CurrentSession->shared_typmod_registry != NULL)
1858 : {
1859 : SharedTypmodTableEntry *entry;
1860 :
1861 : /* Try to find it in the shared typmod index. */
1862 30 : entry = dshash_find(CurrentSession->shared_typmod_table,
1863 : &typmod, false);
1864 30 : if (entry != NULL)
1865 : {
1866 : TupleDesc tupdesc;
1867 :
1868 : tupdesc = (TupleDesc)
1869 30 : dsa_get_address(CurrentSession->area,
1870 : entry->shared_tupdesc);
1871 : Assert(typmod == tupdesc->tdtypmod);
1872 :
1873 : /* We may need to extend the local RecordCacheArray. */
1874 30 : ensure_record_cache_typmod_slot_exists(typmod);
1875 :
1876 : /*
1877 : * Our local array can now point directly to the TupleDesc
1878 : * in shared memory, which is non-reference-counted.
1879 : */
1880 30 : RecordCacheArray[typmod].tupdesc = tupdesc;
1881 : Assert(tupdesc->tdrefcount == -1);
1882 :
1883 : /*
1884 : * We don't share tupdesc identifiers across processes, so
1885 : * assign one locally.
1886 : */
1887 30 : RecordCacheArray[typmod].id = ++tupledesc_id_counter;
1888 :
1889 30 : dshash_release_lock(CurrentSession->shared_typmod_table,
1890 : entry);
1891 :
1892 30 : return RecordCacheArray[typmod].tupdesc;
1893 : }
1894 : }
1895 : }
1896 :
1897 16 : if (!noError)
1898 0 : ereport(ERROR,
1899 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1900 : errmsg("record type has not been registered")));
1901 16 : return NULL;
1902 : }
1903 : }
1904 :
1905 : /*
1906 : * lookup_rowtype_tupdesc
1907 : *
1908 : * Given a typeid/typmod that should describe a known composite type,
1909 : * return the tuple descriptor for the type. Will ereport on failure.
1910 : * (Use ereport because this is reachable with user-specified OIDs,
1911 : * for example from record_in().)
1912 : *
1913 : * Note: on success, we increment the refcount of the returned TupleDesc,
1914 : * and log the reference in CurrentResourceOwner. Caller must call
1915 : * ReleaseTupleDesc when done using the tupdesc. (There are some
1916 : * cases in which the returned tupdesc is not refcounted, in which
1917 : * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1918 : * the tupdesc is guaranteed to live till process exit.)
1919 : */
1920 : TupleDesc
1921 73576 : lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1922 : {
1923 : TupleDesc tupDesc;
1924 :
1925 73576 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1926 73574 : PinTupleDesc(tupDesc);
1927 73574 : return tupDesc;
1928 : }
1929 :
1930 : /*
1931 : * lookup_rowtype_tupdesc_noerror
1932 : *
1933 : * As above, but if the type is not a known composite type and noError
1934 : * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1935 : * type_id is passed, you'll get an ereport anyway.)
1936 : */
1937 : TupleDesc
1938 20 : lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1939 : {
1940 : TupleDesc tupDesc;
1941 :
1942 20 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1943 20 : if (tupDesc != NULL)
1944 20 : PinTupleDesc(tupDesc);
1945 20 : return tupDesc;
1946 : }
1947 :
1948 : /*
1949 : * lookup_rowtype_tupdesc_copy
1950 : *
1951 : * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1952 : * copied into the CurrentMemoryContext and is not reference-counted.
1953 : */
1954 : TupleDesc
1955 50754 : lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1956 : {
1957 : TupleDesc tmp;
1958 :
1959 50754 : tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1960 50754 : return CreateTupleDescCopyConstr(tmp);
1961 : }
1962 :
1963 : /*
1964 : * lookup_rowtype_tupdesc_domain
1965 : *
1966 : * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1967 : * a domain over a named composite type; so this is effectively equivalent to
1968 : * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1969 : * except for being a tad faster.
1970 : *
1971 : * Note: the reason we don't fold the look-through-domain behavior into plain
1972 : * lookup_rowtype_tupdesc() is that we want callers to know they might be
1973 : * dealing with a domain. Otherwise they might construct a tuple that should
1974 : * be of the domain type, but not apply domain constraints.
1975 : */
1976 : TupleDesc
1977 2666 : lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1978 : {
1979 : TupleDesc tupDesc;
1980 :
1981 2666 : if (type_id != RECORDOID)
1982 : {
1983 : /*
1984 : * Check for domain or named composite type. We might as well load
1985 : * whichever data is needed.
1986 : */
1987 : TypeCacheEntry *typentry;
1988 :
1989 2648 : typentry = lookup_type_cache(type_id,
1990 : TYPECACHE_TUPDESC |
1991 : TYPECACHE_DOMAIN_BASE_INFO);
1992 2648 : if (typentry->typtype == TYPTYPE_DOMAIN)
1993 20 : return lookup_rowtype_tupdesc_noerror(typentry->domainBaseType,
1994 : typentry->domainBaseTypmod,
1995 : noError);
1996 2628 : if (typentry->tupDesc == NULL && !noError)
1997 0 : ereport(ERROR,
1998 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1999 : errmsg("type %s is not composite",
2000 : format_type_be(type_id))));
2001 2628 : tupDesc = typentry->tupDesc;
2002 : }
2003 : else
2004 18 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2005 2646 : if (tupDesc != NULL)
2006 2630 : PinTupleDesc(tupDesc);
2007 2646 : return tupDesc;
2008 : }
2009 :
2010 : /*
2011 : * Hash function for the hash table of RecordCacheEntry.
2012 : */
2013 : static uint32
2014 412054 : record_type_typmod_hash(const void *data, size_t size)
2015 : {
2016 412054 : RecordCacheEntry *entry = (RecordCacheEntry *) data;
2017 :
2018 412054 : return hashRowType(entry->tupdesc);
2019 : }
2020 :
2021 : /*
2022 : * Match function for the hash table of RecordCacheEntry.
2023 : */
2024 : static int
2025 385672 : record_type_typmod_compare(const void *a, const void *b, size_t size)
2026 : {
2027 385672 : RecordCacheEntry *left = (RecordCacheEntry *) a;
2028 385672 : RecordCacheEntry *right = (RecordCacheEntry *) b;
2029 :
2030 385672 : return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2031 : }
2032 :
2033 : /*
2034 : * assign_record_type_typmod
2035 : *
2036 : * Given a tuple descriptor for a RECORD type, find or create a cache entry
2037 : * for the type, and set the tupdesc's tdtypmod field to a value that will
2038 : * identify this cache entry to lookup_rowtype_tupdesc.
2039 : */
2040 : void
2041 394692 : assign_record_type_typmod(TupleDesc tupDesc)
2042 : {
2043 : RecordCacheEntry *recentry;
2044 : TupleDesc entDesc;
2045 : bool found;
2046 : MemoryContext oldcxt;
2047 :
2048 : Assert(tupDesc->tdtypeid == RECORDOID);
2049 :
2050 394692 : if (RecordCacheHash == NULL)
2051 : {
2052 : /* First time through: initialize the hash table */
2053 : HASHCTL ctl;
2054 :
2055 6852 : ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2056 6852 : ctl.entrysize = sizeof(RecordCacheEntry);
2057 6852 : ctl.hash = record_type_typmod_hash;
2058 6852 : ctl.match = record_type_typmod_compare;
2059 6852 : RecordCacheHash = hash_create("Record information cache", 64,
2060 : &ctl,
2061 : HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
2062 :
2063 : /* Also make sure CacheMemoryContext exists */
2064 6852 : if (!CacheMemoryContext)
2065 0 : CreateCacheMemoryContext();
2066 : }
2067 :
2068 : /*
2069 : * Find a hashtable entry for this tuple descriptor. We don't use
2070 : * HASH_ENTER yet, because if it's missing, we need to make sure that all
2071 : * the allocations succeed before we create the new entry.
2072 : */
2073 394692 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2074 : &tupDesc,
2075 : HASH_FIND, &found);
2076 394692 : if (found && recentry->tupdesc != NULL)
2077 : {
2078 377330 : tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2079 377330 : return;
2080 : }
2081 :
2082 : /* Not present, so need to manufacture an entry */
2083 17362 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2084 :
2085 : /* Look in the SharedRecordTypmodRegistry, if attached */
2086 17362 : entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
2087 17362 : if (entDesc == NULL)
2088 : {
2089 : /*
2090 : * Make sure we have room before we CreateTupleDescCopy() or advance
2091 : * NextRecordTypmod.
2092 : */
2093 17288 : ensure_record_cache_typmod_slot_exists(NextRecordTypmod);
2094 :
2095 : /* Reference-counted local cache only. */
2096 17288 : entDesc = CreateTupleDescCopy(tupDesc);
2097 17288 : entDesc->tdrefcount = 1;
2098 17288 : entDesc->tdtypmod = NextRecordTypmod++;
2099 : }
2100 : else
2101 : {
2102 74 : ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod);
2103 : }
2104 :
2105 17362 : RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
2106 :
2107 : /* Assign a unique tupdesc identifier, too. */
2108 17362 : RecordCacheArray[entDesc->tdtypmod].id = ++tupledesc_id_counter;
2109 :
2110 : /* Fully initialized; create the hash table entry */
2111 17362 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2112 : &tupDesc,
2113 : HASH_ENTER, NULL);
2114 17362 : recentry->tupdesc = entDesc;
2115 :
2116 : /* Update the caller's tuple descriptor. */
2117 17362 : tupDesc->tdtypmod = entDesc->tdtypmod;
2118 :
2119 17362 : MemoryContextSwitchTo(oldcxt);
2120 : }
2121 :
2122 : /*
2123 : * assign_record_type_identifier
2124 : *
2125 : * Get an identifier, which will be unique over the lifespan of this backend
2126 : * process, for the current tuple descriptor of the specified composite type.
2127 : * For named composite types, the value is guaranteed to change if the type's
2128 : * definition does. For registered RECORD types, the value will not change
2129 : * once assigned, since the registered type won't either. If an anonymous
2130 : * RECORD type is specified, we return a new identifier on each call.
2131 : */
2132 : uint64
2133 5504 : assign_record_type_identifier(Oid type_id, int32 typmod)
2134 : {
2135 5504 : if (type_id != RECORDOID)
2136 : {
2137 : /*
2138 : * It's a named composite type, so use the regular typcache.
2139 : */
2140 : TypeCacheEntry *typentry;
2141 :
2142 0 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2143 0 : if (typentry->tupDesc == NULL)
2144 0 : ereport(ERROR,
2145 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2146 : errmsg("type %s is not composite",
2147 : format_type_be(type_id))));
2148 : Assert(typentry->tupDesc_identifier != 0);
2149 0 : return typentry->tupDesc_identifier;
2150 : }
2151 : else
2152 : {
2153 : /*
2154 : * It's a transient record type, so look in our record-type table.
2155 : */
2156 5504 : if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2157 60 : RecordCacheArray[typmod].tupdesc != NULL)
2158 : {
2159 : Assert(RecordCacheArray[typmod].id != 0);
2160 60 : return RecordCacheArray[typmod].id;
2161 : }
2162 :
2163 : /* For anonymous or unrecognized record type, generate a new ID */
2164 5444 : return ++tupledesc_id_counter;
2165 : }
2166 : }
2167 :
2168 : /*
2169 : * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2170 : * This exists only to avoid exposing private innards of
2171 : * SharedRecordTypmodRegistry in a header.
2172 : */
2173 : size_t
2174 138 : SharedRecordTypmodRegistryEstimate(void)
2175 : {
2176 138 : return sizeof(SharedRecordTypmodRegistry);
2177 : }
2178 :
2179 : /*
2180 : * Initialize 'registry' in a pre-existing shared memory region, which must be
2181 : * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2182 : * bytes.
2183 : *
2184 : * 'area' will be used to allocate shared memory space as required for the
2185 : * typemod registration. The current process, expected to be a leader process
2186 : * in a parallel query, will be attached automatically and its current record
2187 : * types will be loaded into *registry. While attached, all calls to
2188 : * assign_record_type_typmod will use the shared registry. Worker backends
2189 : * will need to attach explicitly.
2190 : *
2191 : * Note that this function takes 'area' and 'segment' as arguments rather than
2192 : * accessing them via CurrentSession, because they aren't installed there
2193 : * until after this function runs.
2194 : */
2195 : void
2196 138 : SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry,
2197 : dsm_segment *segment,
2198 : dsa_area *area)
2199 : {
2200 : MemoryContext old_context;
2201 : dshash_table *record_table;
2202 : dshash_table *typmod_table;
2203 : int32 typmod;
2204 :
2205 : Assert(!IsParallelWorker());
2206 :
2207 : /* We can't already be attached to a shared registry. */
2208 : Assert(CurrentSession->shared_typmod_registry == NULL);
2209 : Assert(CurrentSession->shared_record_table == NULL);
2210 : Assert(CurrentSession->shared_typmod_table == NULL);
2211 :
2212 138 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2213 :
2214 : /* Create the hash table of tuple descriptors indexed by themselves. */
2215 138 : record_table = dshash_create(area, &srtr_record_table_params, area);
2216 :
2217 : /* Create the hash table of tuple descriptors indexed by typmod. */
2218 138 : typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2219 :
2220 138 : MemoryContextSwitchTo(old_context);
2221 :
2222 : /* Initialize the SharedRecordTypmodRegistry. */
2223 138 : registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2224 138 : registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2225 138 : pg_atomic_init_u32(®istry->next_typmod, NextRecordTypmod);
2226 :
2227 : /*
2228 : * Copy all entries from this backend's private registry into the shared
2229 : * registry.
2230 : */
2231 244 : for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2232 : {
2233 : SharedTypmodTableEntry *typmod_table_entry;
2234 : SharedRecordTableEntry *record_table_entry;
2235 : SharedRecordTableKey record_table_key;
2236 : dsa_pointer shared_dp;
2237 : TupleDesc tupdesc;
2238 : bool found;
2239 :
2240 106 : tupdesc = RecordCacheArray[typmod].tupdesc;
2241 106 : if (tupdesc == NULL)
2242 0 : continue;
2243 :
2244 : /* Copy the TupleDesc into shared memory. */
2245 106 : shared_dp = share_tupledesc(area, tupdesc, typmod);
2246 :
2247 : /* Insert into the typmod table. */
2248 106 : typmod_table_entry = dshash_find_or_insert(typmod_table,
2249 106 : &tupdesc->tdtypmod,
2250 : &found);
2251 106 : if (found)
2252 0 : elog(ERROR, "cannot create duplicate shared record typmod");
2253 106 : typmod_table_entry->typmod = tupdesc->tdtypmod;
2254 106 : typmod_table_entry->shared_tupdesc = shared_dp;
2255 106 : dshash_release_lock(typmod_table, typmod_table_entry);
2256 :
2257 : /* Insert into the record table. */
2258 106 : record_table_key.shared = false;
2259 106 : record_table_key.u.local_tupdesc = tupdesc;
2260 106 : record_table_entry = dshash_find_or_insert(record_table,
2261 : &record_table_key,
2262 : &found);
2263 106 : if (!found)
2264 : {
2265 106 : record_table_entry->key.shared = true;
2266 106 : record_table_entry->key.u.shared_tupdesc = shared_dp;
2267 : }
2268 106 : dshash_release_lock(record_table, record_table_entry);
2269 : }
2270 :
2271 : /*
2272 : * Set up the global state that will tell assign_record_type_typmod and
2273 : * lookup_rowtype_tupdesc_internal about the shared registry.
2274 : */
2275 138 : CurrentSession->shared_record_table = record_table;
2276 138 : CurrentSession->shared_typmod_table = typmod_table;
2277 138 : CurrentSession->shared_typmod_registry = registry;
2278 :
2279 : /*
2280 : * We install a detach hook in the leader, but only to handle cleanup on
2281 : * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2282 : * the memory, the leader process will use a shared registry until it
2283 : * exits.
2284 : */
2285 138 : on_dsm_detach(segment, shared_record_typmod_registry_detach, (Datum) 0);
2286 138 : }
2287 :
2288 : /*
2289 : * Attach to 'registry', which must have been initialized already by another
2290 : * backend. Future calls to assign_record_type_typmod and
2291 : * lookup_rowtype_tupdesc_internal will use the shared registry until the
2292 : * current session is detached.
2293 : */
2294 : void
2295 2742 : SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
2296 : {
2297 : MemoryContext old_context;
2298 : dshash_table *record_table;
2299 : dshash_table *typmod_table;
2300 :
2301 : Assert(IsParallelWorker());
2302 :
2303 : /* We can't already be attached to a shared registry. */
2304 : Assert(CurrentSession != NULL);
2305 : Assert(CurrentSession->segment != NULL);
2306 : Assert(CurrentSession->area != NULL);
2307 : Assert(CurrentSession->shared_typmod_registry == NULL);
2308 : Assert(CurrentSession->shared_record_table == NULL);
2309 : Assert(CurrentSession->shared_typmod_table == NULL);
2310 :
2311 : /*
2312 : * We can't already have typmods in our local cache, because they'd clash
2313 : * with those imported by SharedRecordTypmodRegistryInit. This should be
2314 : * a freshly started parallel worker. If we ever support worker
2315 : * recycling, a worker would need to zap its local cache in between
2316 : * servicing different queries, in order to be able to call this and
2317 : * synchronize typmods with a new leader; but that's problematic because
2318 : * we can't be very sure that record-typmod-related state hasn't escaped
2319 : * to anywhere else in the process.
2320 : */
2321 : Assert(NextRecordTypmod == 0);
2322 :
2323 2742 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2324 :
2325 : /* Attach to the two hash tables. */
2326 2742 : record_table = dshash_attach(CurrentSession->area,
2327 : &srtr_record_table_params,
2328 : registry->record_table_handle,
2329 2742 : CurrentSession->area);
2330 2742 : typmod_table = dshash_attach(CurrentSession->area,
2331 : &srtr_typmod_table_params,
2332 : registry->typmod_table_handle,
2333 : NULL);
2334 :
2335 2742 : MemoryContextSwitchTo(old_context);
2336 :
2337 : /*
2338 : * Set up detach hook to run at worker exit. Currently this is the same
2339 : * as the leader's detach hook, but in future they might need to be
2340 : * different.
2341 : */
2342 2742 : on_dsm_detach(CurrentSession->segment,
2343 : shared_record_typmod_registry_detach,
2344 : PointerGetDatum(registry));
2345 :
2346 : /*
2347 : * Set up the session state that will tell assign_record_type_typmod and
2348 : * lookup_rowtype_tupdesc_internal about the shared registry.
2349 : */
2350 2742 : CurrentSession->shared_typmod_registry = registry;
2351 2742 : CurrentSession->shared_record_table = record_table;
2352 2742 : CurrentSession->shared_typmod_table = typmod_table;
2353 2742 : }
2354 :
2355 : /*
2356 : * InvalidateCompositeTypeCacheEntry
2357 : * Invalidate particular TypeCacheEntry on Relcache inval callback
2358 : *
2359 : * Delete the cached tuple descriptor (if any) for the given composite
2360 : * type, and reset whatever info we have cached about the composite type's
2361 : * comparability.
2362 : */
2363 : static void
2364 10506 : InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
2365 : {
2366 : bool hadTupDescOrOpclass;
2367 :
2368 : Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2369 : OidIsValid(typentry->typrelid));
2370 :
2371 17698 : hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2372 7192 : (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2373 :
2374 : /* Delete tupdesc if we have it */
2375 10506 : if (typentry->tupDesc != NULL)
2376 : {
2377 : /*
2378 : * Release our refcount and free the tupdesc if none remain. We can't
2379 : * use DecrTupleDescRefCount here because this reference is not logged
2380 : * by the current resource owner.
2381 : */
2382 : Assert(typentry->tupDesc->tdrefcount > 0);
2383 3314 : if (--typentry->tupDesc->tdrefcount == 0)
2384 2686 : FreeTupleDesc(typentry->tupDesc);
2385 3314 : typentry->tupDesc = NULL;
2386 :
2387 : /*
2388 : * Also clear tupDesc_identifier, so that anyone watching it will
2389 : * realize that the tupdesc has changed.
2390 : */
2391 3314 : typentry->tupDesc_identifier = 0;
2392 : }
2393 :
2394 : /* Reset equality/comparison/hashing validity information */
2395 10506 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2396 :
2397 : /*
2398 : * Call delete_rel_type_cache_if_needed() if we actually cleared
2399 : * something.
2400 : */
2401 10506 : if (hadTupDescOrOpclass)
2402 3314 : delete_rel_type_cache_if_needed(typentry);
2403 10506 : }
2404 :
2405 : /*
2406 : * TypeCacheRelCallback
2407 : * Relcache inval callback function
2408 : *
2409 : * Delete the cached tuple descriptor (if any) for the given rel's composite
2410 : * type, or for all composite types if relid == InvalidOid. Also reset
2411 : * whatever info we have cached about the composite type's comparability.
2412 : *
2413 : * This is called when a relcache invalidation event occurs for the given
2414 : * relid. We can't use syscache to find a type corresponding to the given
2415 : * relation because the code can be called outside of transaction. Thus, we
2416 : * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2417 : */
2418 : static void
2419 2120174 : TypeCacheRelCallback(Datum arg, Oid relid)
2420 : {
2421 : TypeCacheEntry *typentry;
2422 :
2423 : /*
2424 : * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2425 : * callback wouldn't be registered
2426 : */
2427 2120174 : if (OidIsValid(relid))
2428 : {
2429 : RelIdToTypeIdCacheEntry *relentry;
2430 :
2431 : /*
2432 : * Find an RelIdToTypeIdCacheHash entry, which should exist as soon as
2433 : * corresponding typcache entry has something to clean.
2434 : */
2435 2119512 : relentry = (RelIdToTypeIdCacheEntry *) hash_search(RelIdToTypeIdCacheHash,
2436 : &relid,
2437 : HASH_FIND, NULL);
2438 :
2439 2119512 : if (relentry != NULL)
2440 : {
2441 10368 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
2442 10368 : &relentry->composite_typid,
2443 : HASH_FIND, NULL);
2444 :
2445 10368 : if (typentry != NULL)
2446 : {
2447 : Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2448 : Assert(relid == typentry->typrelid);
2449 :
2450 10368 : InvalidateCompositeTypeCacheEntry(typentry);
2451 : }
2452 : }
2453 :
2454 : /*
2455 : * Visit all the domain types sequentially. Typically, this shouldn't
2456 : * affect performance since domain types are less tended to bloat.
2457 : * Domain types are created manually, unlike composite types which are
2458 : * automatically created for every temporary table.
2459 : */
2460 2119512 : for (typentry = firstDomainTypeEntry;
2461 3797796 : typentry != NULL;
2462 1678284 : typentry = typentry->nextDomain)
2463 : {
2464 : /*
2465 : * If it's domain over composite, reset flags. (We don't bother
2466 : * trying to determine whether the specific base type needs a
2467 : * reset.) Note that if we haven't determined whether the base
2468 : * type is composite, we don't need to reset anything.
2469 : */
2470 1678284 : if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2471 0 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2472 : }
2473 : }
2474 : else
2475 : {
2476 : HASH_SEQ_STATUS status;
2477 :
2478 : /*
2479 : * Relid is invalid. By convention, we need to reset all composite
2480 : * types in cache. Also, we should reset flags for domain types, and
2481 : * we loop over all entries in hash, so, do it in a single scan.
2482 : */
2483 662 : hash_seq_init(&status, TypeCacheHash);
2484 3004 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2485 : {
2486 2342 : if (typentry->typtype == TYPTYPE_COMPOSITE)
2487 : {
2488 138 : InvalidateCompositeTypeCacheEntry(typentry);
2489 : }
2490 2204 : else if (typentry->typtype == TYPTYPE_DOMAIN)
2491 : {
2492 : /*
2493 : * If it's domain over composite, reset flags. (We don't
2494 : * bother trying to determine whether the specific base type
2495 : * needs a reset.) Note that if we haven't determined whether
2496 : * the base type is composite, we don't need to reset
2497 : * anything.
2498 : */
2499 44 : if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2500 0 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2501 : }
2502 : }
2503 : }
2504 2120174 : }
2505 :
2506 : /*
2507 : * TypeCacheTypCallback
2508 : * Syscache inval callback function
2509 : *
2510 : * This is called when a syscache invalidation event occurs for any
2511 : * pg_type row. If we have information cached about that type, mark
2512 : * it as needing to be reloaded.
2513 : */
2514 : static void
2515 696942 : TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
2516 : {
2517 : HASH_SEQ_STATUS status;
2518 : TypeCacheEntry *typentry;
2519 :
2520 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2521 :
2522 : /*
2523 : * By convention, zero hash value is passed to the callback as a sign that
2524 : * it's time to invalidate the whole cache. See sinval.c, inval.c and
2525 : * InvalidateSystemCachesExtended().
2526 : */
2527 696942 : if (hashvalue == 0)
2528 516 : hash_seq_init(&status, TypeCacheHash);
2529 : else
2530 696426 : hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2531 :
2532 1400854 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2533 : {
2534 6970 : bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2535 :
2536 : Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2537 :
2538 : /*
2539 : * Mark the data obtained directly from pg_type as invalid. Also, if
2540 : * it's a domain, typnotnull might've changed, so we'll need to
2541 : * recalculate its constraints.
2542 : */
2543 6970 : typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2544 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS);
2545 :
2546 : /*
2547 : * Call delete_rel_type_cache_if_needed() if we cleaned
2548 : * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2549 : */
2550 6970 : if (hadPgTypeData)
2551 3692 : delete_rel_type_cache_if_needed(typentry);
2552 : }
2553 696942 : }
2554 :
2555 : /*
2556 : * TypeCacheOpcCallback
2557 : * Syscache inval callback function
2558 : *
2559 : * This is called when a syscache invalidation event occurs for any pg_opclass
2560 : * row. In principle we could probably just invalidate data dependent on the
2561 : * particular opclass, but since updates on pg_opclass are rare in production
2562 : * it doesn't seem worth a lot of complication: we just mark all cached data
2563 : * invalid.
2564 : *
2565 : * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2566 : * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2567 : * is not allowed to be used to add/drop the primary operators and functions
2568 : * of an opclass, only cross-type members of a family; and the latter sorts
2569 : * of members are not going to get cached here.
2570 : */
2571 : static void
2572 2662 : TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2573 : {
2574 : HASH_SEQ_STATUS status;
2575 : TypeCacheEntry *typentry;
2576 :
2577 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2578 2662 : hash_seq_init(&status, TypeCacheHash);
2579 17000 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2580 : {
2581 11676 : bool hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2582 :
2583 : /* Reset equality/comparison/hashing validity information */
2584 11676 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2585 :
2586 : /*
2587 : * Call delete_rel_type_cache_if_needed() if we actually cleared some
2588 : * of TCFLAGS_OPERATOR_FLAGS.
2589 : */
2590 11676 : if (hadOpclass)
2591 2072 : delete_rel_type_cache_if_needed(typentry);
2592 : }
2593 2662 : }
2594 :
2595 : /*
2596 : * TypeCacheConstrCallback
2597 : * Syscache inval callback function
2598 : *
2599 : * This is called when a syscache invalidation event occurs for any
2600 : * pg_constraint row. We flush information about domain constraints
2601 : * when this happens.
2602 : *
2603 : * It's slightly annoying that we can't tell whether the inval event was for
2604 : * a domain constraint record or not; there's usually more update traffic
2605 : * for table constraints than domain constraints, so we'll do a lot of
2606 : * useless flushes. Still, this is better than the old no-caching-at-all
2607 : * approach to domain constraints.
2608 : */
2609 : static void
2610 194614 : TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2611 : {
2612 : TypeCacheEntry *typentry;
2613 :
2614 : /*
2615 : * Because this is called very frequently, and typically very few of the
2616 : * typcache entries are for domains, we don't use hash_seq_search here.
2617 : * Instead we thread all the domain-type entries together so that we can
2618 : * visit them cheaply.
2619 : */
2620 194614 : for (typentry = firstDomainTypeEntry;
2621 379442 : typentry != NULL;
2622 184828 : typentry = typentry->nextDomain)
2623 : {
2624 : /* Reset domain constraint validity information */
2625 184828 : typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
2626 : }
2627 194614 : }
2628 :
2629 :
2630 : /*
2631 : * Check if given OID is part of the subset that's sortable by comparisons
2632 : */
2633 : static inline bool
2634 303858 : enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
2635 : {
2636 : Oid offset;
2637 :
2638 303858 : if (arg < enumdata->bitmap_base)
2639 0 : return false;
2640 303858 : offset = arg - enumdata->bitmap_base;
2641 303858 : if (offset > (Oid) INT_MAX)
2642 0 : return false;
2643 303858 : return bms_is_member((int) offset, enumdata->sorted_values);
2644 : }
2645 :
2646 :
2647 : /*
2648 : * compare_values_of_enum
2649 : * Compare two members of an enum type.
2650 : * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2651 : *
2652 : * Note: currently, the enumData cache is refreshed only if we are asked
2653 : * to compare an enum value that is not already in the cache. This is okay
2654 : * because there is no support for re-ordering existing values, so comparisons
2655 : * of previously cached values will return the right answer even if other
2656 : * values have been added since we last loaded the cache.
2657 : *
2658 : * Note: the enum logic has a special-case rule about even-numbered versus
2659 : * odd-numbered OIDs, but we take no account of that rule here; this
2660 : * routine shouldn't even get called when that rule applies.
2661 : */
2662 : int
2663 152426 : compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
2664 : {
2665 : TypeCacheEnumData *enumdata;
2666 : EnumItem *item1;
2667 : EnumItem *item2;
2668 :
2669 : /*
2670 : * Equal OIDs are certainly equal --- this case was probably handled by
2671 : * our caller, but we may as well check.
2672 : */
2673 152426 : if (arg1 == arg2)
2674 0 : return 0;
2675 :
2676 : /* Load up the cache if first time through */
2677 152426 : if (tcache->enumData == NULL)
2678 10 : load_enum_cache_data(tcache);
2679 152426 : enumdata = tcache->enumData;
2680 :
2681 : /*
2682 : * If both OIDs are known-sorted, we can just compare them directly.
2683 : */
2684 303858 : if (enum_known_sorted(enumdata, arg1) &&
2685 151432 : enum_known_sorted(enumdata, arg2))
2686 : {
2687 0 : if (arg1 < arg2)
2688 0 : return -1;
2689 : else
2690 0 : return 1;
2691 : }
2692 :
2693 : /*
2694 : * Slow path: we have to identify their actual sort-order positions.
2695 : */
2696 152426 : item1 = find_enumitem(enumdata, arg1);
2697 152426 : item2 = find_enumitem(enumdata, arg2);
2698 :
2699 152426 : if (item1 == NULL || item2 == NULL)
2700 : {
2701 : /*
2702 : * We couldn't find one or both values. That means the enum has
2703 : * changed under us, so re-initialize the cache and try again. We
2704 : * don't bother retrying the known-sorted case in this path.
2705 : */
2706 0 : load_enum_cache_data(tcache);
2707 0 : enumdata = tcache->enumData;
2708 :
2709 0 : item1 = find_enumitem(enumdata, arg1);
2710 0 : item2 = find_enumitem(enumdata, arg2);
2711 :
2712 : /*
2713 : * If we still can't find the values, complain: we must have corrupt
2714 : * data.
2715 : */
2716 0 : if (item1 == NULL)
2717 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2718 : arg1, format_type_be(tcache->type_id));
2719 0 : if (item2 == NULL)
2720 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2721 : arg2, format_type_be(tcache->type_id));
2722 : }
2723 :
2724 152426 : if (item1->sort_order < item2->sort_order)
2725 51536 : return -1;
2726 100890 : else if (item1->sort_order > item2->sort_order)
2727 100890 : return 1;
2728 : else
2729 0 : return 0;
2730 : }
2731 :
2732 : /*
2733 : * Load (or re-load) the enumData member of the typcache entry.
2734 : */
2735 : static void
2736 10 : load_enum_cache_data(TypeCacheEntry *tcache)
2737 : {
2738 : TypeCacheEnumData *enumdata;
2739 : Relation enum_rel;
2740 : SysScanDesc enum_scan;
2741 : HeapTuple enum_tuple;
2742 : ScanKeyData skey;
2743 : EnumItem *items;
2744 : int numitems;
2745 : int maxitems;
2746 : Oid bitmap_base;
2747 : Bitmapset *bitmap;
2748 : MemoryContext oldcxt;
2749 : int bm_size,
2750 : start_pos;
2751 :
2752 : /* Check that this is actually an enum */
2753 10 : if (tcache->typtype != TYPTYPE_ENUM)
2754 0 : ereport(ERROR,
2755 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2756 : errmsg("%s is not an enum",
2757 : format_type_be(tcache->type_id))));
2758 :
2759 : /*
2760 : * Read all the information for members of the enum type. We collect the
2761 : * info in working memory in the caller's context, and then transfer it to
2762 : * permanent memory in CacheMemoryContext. This minimizes the risk of
2763 : * leaking memory from CacheMemoryContext in the event of an error partway
2764 : * through.
2765 : */
2766 10 : maxitems = 64;
2767 10 : items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2768 10 : numitems = 0;
2769 :
2770 : /* Scan pg_enum for the members of the target enum type. */
2771 10 : ScanKeyInit(&skey,
2772 : Anum_pg_enum_enumtypid,
2773 : BTEqualStrategyNumber, F_OIDEQ,
2774 : ObjectIdGetDatum(tcache->type_id));
2775 :
2776 10 : enum_rel = table_open(EnumRelationId, AccessShareLock);
2777 10 : enum_scan = systable_beginscan(enum_rel,
2778 : EnumTypIdLabelIndexId,
2779 : true, NULL,
2780 : 1, &skey);
2781 :
2782 80 : while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2783 : {
2784 70 : Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2785 :
2786 70 : if (numitems >= maxitems)
2787 : {
2788 0 : maxitems *= 2;
2789 0 : items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2790 : }
2791 70 : items[numitems].enum_oid = en->oid;
2792 70 : items[numitems].sort_order = en->enumsortorder;
2793 70 : numitems++;
2794 : }
2795 :
2796 10 : systable_endscan(enum_scan);
2797 10 : table_close(enum_rel, AccessShareLock);
2798 :
2799 : /* Sort the items into OID order */
2800 10 : qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2801 :
2802 : /*
2803 : * Here, we create a bitmap listing a subset of the enum's OIDs that are
2804 : * known to be in order and can thus be compared with just OID comparison.
2805 : *
2806 : * The point of this is that the enum's initial OIDs were certainly in
2807 : * order, so there is some subset that can be compared via OID comparison;
2808 : * and we'd rather not do binary searches unnecessarily.
2809 : *
2810 : * This is somewhat heuristic, and might identify a subset of OIDs that
2811 : * isn't exactly what the type started with. That's okay as long as the
2812 : * subset is correctly sorted.
2813 : */
2814 10 : bitmap_base = InvalidOid;
2815 10 : bitmap = NULL;
2816 10 : bm_size = 1; /* only save sets of at least 2 OIDs */
2817 :
2818 22 : for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2819 : {
2820 : /*
2821 : * Identify longest sorted subsequence starting at start_pos
2822 : */
2823 22 : Bitmapset *this_bitmap = bms_make_singleton(0);
2824 22 : int this_bm_size = 1;
2825 22 : Oid start_oid = items[start_pos].enum_oid;
2826 22 : float4 prev_order = items[start_pos].sort_order;
2827 : int i;
2828 :
2829 148 : for (i = start_pos + 1; i < numitems; i++)
2830 : {
2831 : Oid offset;
2832 :
2833 126 : offset = items[i].enum_oid - start_oid;
2834 : /* quit if bitmap would be too large; cutoff is arbitrary */
2835 126 : if (offset >= 8192)
2836 0 : break;
2837 : /* include the item if it's in-order */
2838 126 : if (items[i].sort_order > prev_order)
2839 : {
2840 68 : prev_order = items[i].sort_order;
2841 68 : this_bitmap = bms_add_member(this_bitmap, (int) offset);
2842 68 : this_bm_size++;
2843 : }
2844 : }
2845 :
2846 : /* Remember it if larger than previous best */
2847 22 : if (this_bm_size > bm_size)
2848 : {
2849 10 : bms_free(bitmap);
2850 10 : bitmap_base = start_oid;
2851 10 : bitmap = this_bitmap;
2852 10 : bm_size = this_bm_size;
2853 : }
2854 : else
2855 12 : bms_free(this_bitmap);
2856 :
2857 : /*
2858 : * Done if it's not possible to find a longer sequence in the rest of
2859 : * the list. In typical cases this will happen on the first
2860 : * iteration, which is why we create the bitmaps on the fly instead of
2861 : * doing a second pass over the list.
2862 : */
2863 22 : if (bm_size >= (numitems - start_pos - 1))
2864 10 : break;
2865 : }
2866 :
2867 : /* OK, copy the data into CacheMemoryContext */
2868 10 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2869 : enumdata = (TypeCacheEnumData *)
2870 10 : palloc(offsetof(TypeCacheEnumData, enum_values) +
2871 10 : numitems * sizeof(EnumItem));
2872 10 : enumdata->bitmap_base = bitmap_base;
2873 10 : enumdata->sorted_values = bms_copy(bitmap);
2874 10 : enumdata->num_values = numitems;
2875 10 : memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2876 10 : MemoryContextSwitchTo(oldcxt);
2877 :
2878 10 : pfree(items);
2879 10 : bms_free(bitmap);
2880 :
2881 : /* And link the finished cache struct into the typcache */
2882 10 : if (tcache->enumData != NULL)
2883 0 : pfree(tcache->enumData);
2884 10 : tcache->enumData = enumdata;
2885 10 : }
2886 :
2887 : /*
2888 : * Locate the EnumItem with the given OID, if present
2889 : */
2890 : static EnumItem *
2891 304852 : find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
2892 : {
2893 : EnumItem srch;
2894 :
2895 : /* On some versions of Solaris, bsearch of zero items dumps core */
2896 304852 : if (enumdata->num_values <= 0)
2897 0 : return NULL;
2898 :
2899 304852 : srch.enum_oid = arg;
2900 304852 : return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2901 : sizeof(EnumItem), enum_oid_cmp);
2902 : }
2903 :
2904 : /*
2905 : * qsort comparison function for OID-ordered EnumItems
2906 : */
2907 : static int
2908 614378 : enum_oid_cmp(const void *left, const void *right)
2909 : {
2910 614378 : const EnumItem *l = (const EnumItem *) left;
2911 614378 : const EnumItem *r = (const EnumItem *) right;
2912 :
2913 614378 : return pg_cmp_u32(l->enum_oid, r->enum_oid);
2914 : }
2915 :
2916 : /*
2917 : * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2918 : * to the given value and return a dsa_pointer.
2919 : */
2920 : static dsa_pointer
2921 174 : share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2922 : {
2923 : dsa_pointer shared_dp;
2924 : TupleDesc shared;
2925 :
2926 174 : shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2927 174 : shared = (TupleDesc) dsa_get_address(area, shared_dp);
2928 174 : TupleDescCopy(shared, tupdesc);
2929 174 : shared->tdtypmod = typmod;
2930 :
2931 174 : return shared_dp;
2932 : }
2933 :
2934 : /*
2935 : * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2936 : * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2937 : * Tuple descriptors returned by this function are not reference counted, and
2938 : * will exist at least as long as the current backend remained attached to the
2939 : * current session.
2940 : */
2941 : static TupleDesc
2942 17362 : find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
2943 : {
2944 : TupleDesc result;
2945 : SharedRecordTableKey key;
2946 : SharedRecordTableEntry *record_table_entry;
2947 : SharedTypmodTableEntry *typmod_table_entry;
2948 : dsa_pointer shared_dp;
2949 : bool found;
2950 : uint32 typmod;
2951 :
2952 : /* If not even attached, nothing to do. */
2953 17362 : if (CurrentSession->shared_typmod_registry == NULL)
2954 17288 : return NULL;
2955 :
2956 : /* Try to find a matching tuple descriptor in the record table. */
2957 74 : key.shared = false;
2958 74 : key.u.local_tupdesc = tupdesc;
2959 : record_table_entry = (SharedRecordTableEntry *)
2960 74 : dshash_find(CurrentSession->shared_record_table, &key, false);
2961 74 : if (record_table_entry)
2962 : {
2963 : Assert(record_table_entry->key.shared);
2964 6 : dshash_release_lock(CurrentSession->shared_record_table,
2965 : record_table_entry);
2966 : result = (TupleDesc)
2967 6 : dsa_get_address(CurrentSession->area,
2968 : record_table_entry->key.u.shared_tupdesc);
2969 : Assert(result->tdrefcount == -1);
2970 :
2971 6 : return result;
2972 : }
2973 :
2974 : /* Allocate a new typmod number. This will be wasted if we error out. */
2975 68 : typmod = (int)
2976 68 : pg_atomic_fetch_add_u32(&CurrentSession->shared_typmod_registry->next_typmod,
2977 : 1);
2978 :
2979 : /* Copy the TupleDesc into shared memory. */
2980 68 : shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2981 :
2982 : /*
2983 : * Create an entry in the typmod table so that others will understand this
2984 : * typmod number.
2985 : */
2986 68 : PG_TRY();
2987 : {
2988 : typmod_table_entry = (SharedTypmodTableEntry *)
2989 68 : dshash_find_or_insert(CurrentSession->shared_typmod_table,
2990 : &typmod, &found);
2991 68 : if (found)
2992 0 : elog(ERROR, "cannot create duplicate shared record typmod");
2993 : }
2994 0 : PG_CATCH();
2995 : {
2996 0 : dsa_free(CurrentSession->area, shared_dp);
2997 0 : PG_RE_THROW();
2998 : }
2999 68 : PG_END_TRY();
3000 68 : typmod_table_entry->typmod = typmod;
3001 68 : typmod_table_entry->shared_tupdesc = shared_dp;
3002 68 : dshash_release_lock(CurrentSession->shared_typmod_table,
3003 : typmod_table_entry);
3004 :
3005 : /*
3006 : * Finally create an entry in the record table so others with matching
3007 : * tuple descriptors can reuse the typmod.
3008 : */
3009 : record_table_entry = (SharedRecordTableEntry *)
3010 68 : dshash_find_or_insert(CurrentSession->shared_record_table, &key,
3011 : &found);
3012 68 : if (found)
3013 : {
3014 : /*
3015 : * Someone concurrently inserted a matching tuple descriptor since the
3016 : * first time we checked. Use that one instead.
3017 : */
3018 0 : dshash_release_lock(CurrentSession->shared_record_table,
3019 : record_table_entry);
3020 :
3021 : /* Might as well free up the space used by the one we created. */
3022 0 : found = dshash_delete_key(CurrentSession->shared_typmod_table,
3023 : &typmod);
3024 : Assert(found);
3025 0 : dsa_free(CurrentSession->area, shared_dp);
3026 :
3027 : /* Return the one we found. */
3028 : Assert(record_table_entry->key.shared);
3029 : result = (TupleDesc)
3030 0 : dsa_get_address(CurrentSession->area,
3031 : record_table_entry->key.u.shared_tupdesc);
3032 : Assert(result->tdrefcount == -1);
3033 :
3034 0 : return result;
3035 : }
3036 :
3037 : /* Store it and return it. */
3038 68 : record_table_entry->key.shared = true;
3039 68 : record_table_entry->key.u.shared_tupdesc = shared_dp;
3040 68 : dshash_release_lock(CurrentSession->shared_record_table,
3041 : record_table_entry);
3042 : result = (TupleDesc)
3043 68 : dsa_get_address(CurrentSession->area, shared_dp);
3044 : Assert(result->tdrefcount == -1);
3045 :
3046 68 : return result;
3047 : }
3048 :
3049 : /*
3050 : * On-DSM-detach hook to forget about the current shared record typmod
3051 : * infrastructure. This is currently used by both leader and workers.
3052 : */
3053 : static void
3054 2880 : shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
3055 : {
3056 : /* Be cautious here: maybe we didn't finish initializing. */
3057 2880 : if (CurrentSession->shared_record_table != NULL)
3058 : {
3059 2880 : dshash_detach(CurrentSession->shared_record_table);
3060 2880 : CurrentSession->shared_record_table = NULL;
3061 : }
3062 2880 : if (CurrentSession->shared_typmod_table != NULL)
3063 : {
3064 2880 : dshash_detach(CurrentSession->shared_typmod_table);
3065 2880 : CurrentSession->shared_typmod_table = NULL;
3066 : }
3067 2880 : CurrentSession->shared_typmod_registry = NULL;
3068 2880 : }
3069 :
3070 : /*
3071 : * Insert RelIdToTypeIdCacheHash entry if needed.
3072 : */
3073 : static void
3074 733932 : insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
3075 : {
3076 : /* Immediately quit for non-composite types */
3077 733932 : if (typentry->typtype != TYPTYPE_COMPOSITE)
3078 648622 : return;
3079 :
3080 : /* typrelid should be given for composite types */
3081 : Assert(OidIsValid(typentry->typrelid));
3082 :
3083 : /*
3084 : * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3085 : * information indicating it should be here.
3086 : */
3087 85310 : if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3088 0 : (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3089 0 : typentry->tupDesc != NULL)
3090 : {
3091 : RelIdToTypeIdCacheEntry *relentry;
3092 : bool found;
3093 :
3094 85310 : relentry = (RelIdToTypeIdCacheEntry *) hash_search(RelIdToTypeIdCacheHash,
3095 85310 : &typentry->typrelid,
3096 : HASH_ENTER, &found);
3097 85310 : relentry->relid = typentry->typrelid;
3098 85310 : relentry->composite_typid = typentry->type_id;
3099 : }
3100 : }
3101 :
3102 : /*
3103 : * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3104 : * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3105 : * or tupDesc.
3106 : */
3107 : static void
3108 9078 : delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
3109 : {
3110 : #ifdef USE_ASSERT_CHECKING
3111 : int i;
3112 : bool is_in_progress = false;
3113 :
3114 : for (i = 0; i < in_progress_list_len; i++)
3115 : {
3116 : if (in_progress_list[i] == typentry->type_id)
3117 : {
3118 : is_in_progress = true;
3119 : break;
3120 : }
3121 : }
3122 : #endif
3123 :
3124 : /* Immediately quit for non-composite types */
3125 9078 : if (typentry->typtype != TYPTYPE_COMPOSITE)
3126 3582 : return;
3127 :
3128 : /* typrelid should be given for composite types */
3129 : Assert(OidIsValid(typentry->typrelid));
3130 :
3131 : /*
3132 : * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3133 : * information indicating entry should be still there.
3134 : */
3135 5496 : if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3136 2932 : !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3137 2848 : typentry->tupDesc == NULL)
3138 : {
3139 : bool found;
3140 :
3141 2176 : (void) hash_search(RelIdToTypeIdCacheHash,
3142 2176 : &typentry->typrelid,
3143 : HASH_REMOVE, &found);
3144 : Assert(found || is_in_progress);
3145 : }
3146 : else
3147 : {
3148 : #ifdef USE_ASSERT_CHECKING
3149 : /*
3150 : * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3151 : * entry if it should exist.
3152 : */
3153 : bool found;
3154 :
3155 : if (!is_in_progress)
3156 : {
3157 : (void) hash_search(RelIdToTypeIdCacheHash,
3158 : &typentry->typrelid,
3159 : HASH_FIND, &found);
3160 : Assert(found);
3161 : }
3162 : #endif
3163 : }
3164 : }
3165 :
3166 : /*
3167 : * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3168 : * entries, marked as in-progress by lookup_type_cache(). It may happen
3169 : * in case of an error or interruption during the lookup_type_cache() call.
3170 : */
3171 : static void
3172 1184622 : finalize_in_progress_typentries(void)
3173 : {
3174 : int i;
3175 :
3176 1184624 : for (i = 0; i < in_progress_list_len; i++)
3177 : {
3178 : TypeCacheEntry *typentry;
3179 :
3180 2 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
3181 2 : &in_progress_list[i],
3182 : HASH_FIND, NULL);
3183 2 : if (typentry)
3184 2 : insert_rel_type_cache_if_needed(typentry);
3185 : }
3186 :
3187 1184622 : in_progress_list_len = 0;
3188 1184622 : }
3189 :
3190 : void
3191 1164570 : AtEOXact_TypeCache(void)
3192 : {
3193 1164570 : finalize_in_progress_typentries();
3194 1164570 : }
3195 :
3196 : void
3197 20052 : AtEOSubXact_TypeCache(void)
3198 : {
3199 20052 : finalize_in_progress_typentries();
3200 20052 : }
|