Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * typcache.c
4 : * POSTGRES type cache code
5 : *
6 : * The type cache exists to speed lookup of certain information about data
7 : * types that is not directly available from a type's pg_type row. For
8 : * example, we use a type's default btree opclass, or the default hash
9 : * opclass if no btree opclass exists, to determine which operators should
10 : * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 : *
12 : * Several seemingly-odd choices have been made to support use of the type
13 : * cache by generic array and record handling routines, such as array_eq(),
14 : * record_cmp(), and hash_array(). Because those routines are used as index
15 : * support operations, they cannot leak memory. To allow them to execute
16 : * efficiently, all information that they would like to re-use across calls
17 : * is kept in the type cache.
18 : *
19 : * Once created, a type cache entry lives as long as the backend does, so
20 : * there is no need for a call to release a cache entry. If the type is
21 : * dropped, the cache entry simply becomes wasted storage. This is not
22 : * expected to happen often, and assuming that typcache entries are good
23 : * permanently allows caching pointers to them in long-lived places.
24 : *
25 : * We have some provisions for updating cache entries if the stored data
26 : * becomes obsolete. Core data extracted from the pg_type row is updated
27 : * when we detect updates to pg_type. Information dependent on opclasses is
28 : * cleared if we detect updates to pg_opclass. We also support clearing the
29 : * tuple descriptor and operator/function parts of a rowtype's cache entry,
30 : * since those may need to change as a consequence of ALTER TABLE. Domain
31 : * constraint changes are also tracked properly.
32 : *
33 : *
34 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
35 : * Portions Copyright (c) 1994, Regents of the University of California
36 : *
37 : * IDENTIFICATION
38 : * src/backend/utils/cache/typcache.c
39 : *
40 : *-------------------------------------------------------------------------
41 : */
42 : #include "postgres.h"
43 :
44 : #include <limits.h>
45 :
46 : #include "access/hash.h"
47 : #include "access/htup_details.h"
48 : #include "access/nbtree.h"
49 : #include "access/parallel.h"
50 : #include "access/relation.h"
51 : #include "access/session.h"
52 : #include "access/table.h"
53 : #include "catalog/pg_am.h"
54 : #include "catalog/pg_constraint.h"
55 : #include "catalog/pg_enum.h"
56 : #include "catalog/pg_operator.h"
57 : #include "catalog/pg_range.h"
58 : #include "catalog/pg_type.h"
59 : #include "commands/defrem.h"
60 : #include "common/int.h"
61 : #include "executor/executor.h"
62 : #include "lib/dshash.h"
63 : #include "optimizer/optimizer.h"
64 : #include "port/pg_bitutils.h"
65 : #include "storage/lwlock.h"
66 : #include "utils/builtins.h"
67 : #include "utils/catcache.h"
68 : #include "utils/fmgroids.h"
69 : #include "utils/injection_point.h"
70 : #include "utils/inval.h"
71 : #include "utils/lsyscache.h"
72 : #include "utils/memutils.h"
73 : #include "utils/rel.h"
74 : #include "utils/syscache.h"
75 : #include "utils/typcache.h"
76 :
77 :
78 : /* The main type cache hashtable searched by lookup_type_cache */
79 : static HTAB *TypeCacheHash = NULL;
80 :
81 : /*
82 : * The mapping of relation's OID to the corresponding composite type OID.
83 : * We're keeping the map entry when the corresponding typentry has something
84 : * to clear i.e it has either TCFLAGS_HAVE_PG_TYPE_DATA, or
85 : * TCFLAGS_OPERATOR_FLAGS, or tupdesc.
86 : */
87 : static HTAB *RelIdToTypeIdCacheHash = NULL;
88 :
89 : typedef struct RelIdToTypeIdCacheEntry
90 : {
91 : Oid relid; /* OID of the relation */
92 : Oid composite_typid; /* OID of the relation's composite type */
93 : } RelIdToTypeIdCacheEntry;
94 :
95 : /* List of type cache entries for domain types */
96 : static TypeCacheEntry *firstDomainTypeEntry = NULL;
97 :
98 : /* Private flag bits in the TypeCacheEntry.flags field */
99 : #define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
100 : #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
101 : #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
102 : #define TCFLAGS_CHECKED_EQ_OPR 0x000008
103 : #define TCFLAGS_CHECKED_LT_OPR 0x000010
104 : #define TCFLAGS_CHECKED_GT_OPR 0x000020
105 : #define TCFLAGS_CHECKED_CMP_PROC 0x000040
106 : #define TCFLAGS_CHECKED_HASH_PROC 0x000080
107 : #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
108 : #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
109 : #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
110 : #define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
111 : #define TCFLAGS_HAVE_ELEM_HASHING 0x001000
112 : #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
113 : #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
114 : #define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
115 : #define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
116 : #define TCFLAGS_HAVE_FIELD_HASHING 0x020000
117 : #define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
118 : #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
119 : #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
120 :
121 : /* The flags associated with equality/comparison/hashing are all but these: */
122 : #define TCFLAGS_OPERATOR_FLAGS \
123 : (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
124 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
125 : TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
126 :
127 : /*
128 : * Data stored about a domain type's constraints. Note that we do not create
129 : * this struct for the common case of a constraint-less domain; we just set
130 : * domainData to NULL to indicate that.
131 : *
132 : * Within a DomainConstraintCache, we store expression plan trees, but the
133 : * check_exprstate fields of the DomainConstraintState nodes are just NULL.
134 : * When needed, expression evaluation nodes are built by flat-copying the
135 : * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
136 : * Such a node tree is not part of the DomainConstraintCache, but is
137 : * considered to belong to a DomainConstraintRef.
138 : */
139 : struct DomainConstraintCache
140 : {
141 : List *constraints; /* list of DomainConstraintState nodes */
142 : MemoryContext dccContext; /* memory context holding all associated data */
143 : long dccRefCount; /* number of references to this struct */
144 : };
145 :
146 : /* Private information to support comparisons of enum values */
147 : typedef struct
148 : {
149 : Oid enum_oid; /* OID of one enum value */
150 : float4 sort_order; /* its sort position */
151 : } EnumItem;
152 :
153 : typedef struct TypeCacheEnumData
154 : {
155 : Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
156 : Bitmapset *sorted_values; /* Set of OIDs known to be in order */
157 : int num_values; /* total number of values in enum */
158 : EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER];
159 : } TypeCacheEnumData;
160 :
161 : /*
162 : * We use a separate table for storing the definitions of non-anonymous
163 : * record types. Once defined, a record type will be remembered for the
164 : * life of the backend. Subsequent uses of the "same" record type (where
165 : * sameness means equalRowTypes) will refer to the existing table entry.
166 : *
167 : * Stored record types are remembered in a linear array of TupleDescs,
168 : * which can be indexed quickly with the assigned typmod. There is also
169 : * a hash table to speed searches for matching TupleDescs.
170 : */
171 :
172 : typedef struct RecordCacheEntry
173 : {
174 : TupleDesc tupdesc;
175 : } RecordCacheEntry;
176 :
177 : /*
178 : * To deal with non-anonymous record types that are exchanged by backends
179 : * involved in a parallel query, we also need a shared version of the above.
180 : */
181 : struct SharedRecordTypmodRegistry
182 : {
183 : /* A hash table for finding a matching TupleDesc. */
184 : dshash_table_handle record_table_handle;
185 : /* A hash table for finding a TupleDesc by typmod. */
186 : dshash_table_handle typmod_table_handle;
187 : /* A source of new record typmod numbers. */
188 : pg_atomic_uint32 next_typmod;
189 : };
190 :
191 : /*
192 : * When using shared tuple descriptors as hash table keys we need a way to be
193 : * able to search for an equal shared TupleDesc using a backend-local
194 : * TupleDesc. So we use this type which can hold either, and hash and compare
195 : * functions that know how to handle both.
196 : */
197 : typedef struct SharedRecordTableKey
198 : {
199 : union
200 : {
201 : TupleDesc local_tupdesc;
202 : dsa_pointer shared_tupdesc;
203 : } u;
204 : bool shared;
205 : } SharedRecordTableKey;
206 :
207 : /*
208 : * The shared version of RecordCacheEntry. This lets us look up a typmod
209 : * using a TupleDesc which may be in local or shared memory.
210 : */
211 : typedef struct SharedRecordTableEntry
212 : {
213 : SharedRecordTableKey key;
214 : } SharedRecordTableEntry;
215 :
216 : /*
217 : * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
218 : * up a TupleDesc in shared memory using a typmod.
219 : */
220 : typedef struct SharedTypmodTableEntry
221 : {
222 : uint32 typmod;
223 : dsa_pointer shared_tupdesc;
224 : } SharedTypmodTableEntry;
225 :
226 : static Oid *in_progress_list;
227 : static int in_progress_list_len;
228 : static int in_progress_list_maxlen;
229 :
230 : /*
231 : * A comparator function for SharedRecordTableKey.
232 : */
233 : static int
234 99 : shared_record_table_compare(const void *a, const void *b, size_t size,
235 : void *arg)
236 : {
237 99 : dsa_area *area = (dsa_area *) arg;
238 99 : const SharedRecordTableKey *k1 = a;
239 99 : const SharedRecordTableKey *k2 = b;
240 : TupleDesc t1;
241 : TupleDesc t2;
242 :
243 99 : if (k1->shared)
244 0 : t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
245 : else
246 99 : t1 = k1->u.local_tupdesc;
247 :
248 99 : if (k2->shared)
249 99 : t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
250 : else
251 0 : t2 = k2->u.local_tupdesc;
252 :
253 99 : return equalRowTypes(t1, t2) ? 0 : 1;
254 : }
255 :
256 : /*
257 : * A hash function for SharedRecordTableKey.
258 : */
259 : static uint32
260 224 : shared_record_table_hash(const void *a, size_t size, void *arg)
261 : {
262 224 : dsa_area *area = arg;
263 224 : const SharedRecordTableKey *k = a;
264 : TupleDesc t;
265 :
266 224 : if (k->shared)
267 0 : t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
268 : else
269 224 : t = k->u.local_tupdesc;
270 :
271 224 : return hashRowType(t);
272 : }
273 :
274 : /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
275 : static const dshash_parameters srtr_record_table_params = {
276 : sizeof(SharedRecordTableKey), /* unused */
277 : sizeof(SharedRecordTableEntry),
278 : shared_record_table_compare,
279 : shared_record_table_hash,
280 : dshash_memcpy,
281 : LWTRANCHE_PER_SESSION_RECORD_TYPE
282 : };
283 :
284 : /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
285 : static const dshash_parameters srtr_typmod_table_params = {
286 : sizeof(uint32),
287 : sizeof(SharedTypmodTableEntry),
288 : dshash_memcmp,
289 : dshash_memhash,
290 : dshash_memcpy,
291 : LWTRANCHE_PER_SESSION_RECORD_TYPMOD
292 : };
293 :
294 : /* hashtable for recognizing registered record types */
295 : static HTAB *RecordCacheHash = NULL;
296 :
297 : typedef struct RecordCacheArrayEntry
298 : {
299 : uint64 id;
300 : TupleDesc tupdesc;
301 : } RecordCacheArrayEntry;
302 :
303 : /* array of info about registered record types, indexed by assigned typmod */
304 : static RecordCacheArrayEntry *RecordCacheArray = NULL;
305 : static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
306 : static int32 NextRecordTypmod = 0; /* number of entries used */
307 :
308 : /*
309 : * Process-wide counter for generating unique tupledesc identifiers.
310 : * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
311 : * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
312 : */
313 : static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER;
314 :
315 : static void load_typcache_tupdesc(TypeCacheEntry *typentry);
316 : static void load_rangetype_info(TypeCacheEntry *typentry);
317 : static void load_multirangetype_info(TypeCacheEntry *typentry);
318 : static void load_domaintype_info(TypeCacheEntry *typentry);
319 : static int dcs_cmp(const void *a, const void *b);
320 : static void decr_dcc_refcount(DomainConstraintCache *dcc);
321 : static void dccref_deletion_callback(void *arg);
322 : static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
323 : static bool array_element_has_equality(TypeCacheEntry *typentry);
324 : static bool array_element_has_compare(TypeCacheEntry *typentry);
325 : static bool array_element_has_hashing(TypeCacheEntry *typentry);
326 : static bool array_element_has_extended_hashing(TypeCacheEntry *typentry);
327 : static void cache_array_element_properties(TypeCacheEntry *typentry);
328 : static bool record_fields_have_equality(TypeCacheEntry *typentry);
329 : static bool record_fields_have_compare(TypeCacheEntry *typentry);
330 : static bool record_fields_have_hashing(TypeCacheEntry *typentry);
331 : static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry);
332 : static void cache_record_field_properties(TypeCacheEntry *typentry);
333 : static bool range_element_has_hashing(TypeCacheEntry *typentry);
334 : static bool range_element_has_extended_hashing(TypeCacheEntry *typentry);
335 : static void cache_range_element_properties(TypeCacheEntry *typentry);
336 : static bool multirange_element_has_hashing(TypeCacheEntry *typentry);
337 : static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry);
338 : static void cache_multirange_element_properties(TypeCacheEntry *typentry);
339 : static void TypeCacheRelCallback(Datum arg, Oid relid);
340 : static void TypeCacheTypCallback(Datum arg, SysCacheIdentifier cacheid,
341 : uint32 hashvalue);
342 : static void TypeCacheOpcCallback(Datum arg, SysCacheIdentifier cacheid,
343 : uint32 hashvalue);
344 : static void TypeCacheConstrCallback(Datum arg, SysCacheIdentifier cacheid,
345 : uint32 hashvalue);
346 : static void load_enum_cache_data(TypeCacheEntry *tcache);
347 : static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
348 : static int enum_oid_cmp(const void *left, const void *right);
349 : static void shared_record_typmod_registry_detach(dsm_segment *segment,
350 : Datum datum);
351 : static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc);
352 : static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
353 : uint32 typmod);
354 : static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry);
355 : static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry);
356 :
357 :
358 : /*
359 : * Hash function compatible with one-arg system cache hash function.
360 : */
361 : static uint32
362 453144 : type_cache_syshash(const void *key, Size keysize)
363 : {
364 : Assert(keysize == sizeof(Oid));
365 453144 : return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
366 : }
367 :
368 : /*
369 : * lookup_type_cache
370 : *
371 : * Fetch the type cache entry for the specified datatype, and make sure that
372 : * all the fields requested by bits in 'flags' are valid.
373 : *
374 : * The result is never NULL --- we will ereport() if the passed type OID is
375 : * invalid. Note however that we may fail to find one or more of the
376 : * values requested by 'flags'; the caller needs to check whether the fields
377 : * are InvalidOid or not.
378 : *
379 : * Note that while filling TypeCacheEntry we might process concurrent
380 : * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
381 : * invalidated. In this case, we typically only clear flags while values are
382 : * still available for the caller. It's expected that the caller holds
383 : * enough locks on type-depending objects that the values are still relevant.
384 : * It's also important that the tupdesc is filled after all other
385 : * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
386 : * invalidated during the lookup_type_cache() call.
387 : */
388 : TypeCacheEntry *
389 410279 : lookup_type_cache(Oid type_id, int flags)
390 : {
391 : TypeCacheEntry *typentry;
392 : bool found;
393 : int in_progress_offset;
394 :
395 410279 : if (TypeCacheHash == NULL)
396 : {
397 : /* First time through: initialize the hash table */
398 : HASHCTL ctl;
399 : int allocsize;
400 :
401 4165 : ctl.keysize = sizeof(Oid);
402 4165 : ctl.entrysize = sizeof(TypeCacheEntry);
403 :
404 : /*
405 : * TypeCacheEntry takes hash value from the system cache. For
406 : * TypeCacheHash we use the same hash in order to speedup search by
407 : * hash value. This is used by hash_seq_init_with_hash_value().
408 : */
409 4165 : ctl.hash = type_cache_syshash;
410 :
411 4165 : TypeCacheHash = hash_create("Type information cache", 64,
412 : &ctl, HASH_ELEM | HASH_FUNCTION);
413 :
414 : Assert(RelIdToTypeIdCacheHash == NULL);
415 :
416 4165 : ctl.keysize = sizeof(Oid);
417 4165 : ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
418 4165 : RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
419 : &ctl, HASH_ELEM | HASH_BLOBS);
420 :
421 : /* Also set up callbacks for SI invalidations */
422 4165 : CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
423 4165 : CacheRegisterSyscacheCallback(TYPEOID, TypeCacheTypCallback, (Datum) 0);
424 4165 : CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
425 4165 : CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
426 :
427 : /* Also make sure CacheMemoryContext exists */
428 4165 : if (!CacheMemoryContext)
429 0 : CreateCacheMemoryContext();
430 :
431 : /*
432 : * reserve enough in_progress_list slots for many cases
433 : */
434 4165 : allocsize = 4;
435 4165 : in_progress_list =
436 4165 : MemoryContextAlloc(CacheMemoryContext,
437 : allocsize * sizeof(*in_progress_list));
438 4165 : in_progress_list_maxlen = allocsize;
439 : }
440 :
441 : Assert(TypeCacheHash != NULL && RelIdToTypeIdCacheHash != NULL);
442 :
443 : /* Register to catch invalidation messages */
444 410279 : if (in_progress_list_len >= in_progress_list_maxlen)
445 : {
446 : int allocsize;
447 :
448 0 : allocsize = in_progress_list_maxlen * 2;
449 0 : in_progress_list = repalloc(in_progress_list,
450 : allocsize * sizeof(*in_progress_list));
451 0 : in_progress_list_maxlen = allocsize;
452 : }
453 410279 : in_progress_offset = in_progress_list_len++;
454 410279 : in_progress_list[in_progress_offset] = type_id;
455 :
456 : /* Try to look up an existing entry */
457 410279 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
458 : &type_id,
459 : HASH_FIND, NULL);
460 410279 : if (typentry == NULL)
461 : {
462 : /*
463 : * If we didn't find one, we want to make one. But first look up the
464 : * pg_type row, just to make sure we don't make a cache entry for an
465 : * invalid type OID. If the type OID is not valid, present a
466 : * user-facing error, since some code paths such as domain_in() allow
467 : * this function to be reached with a user-supplied OID.
468 : */
469 : HeapTuple tp;
470 : Form_pg_type typtup;
471 :
472 18542 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
473 18542 : if (!HeapTupleIsValid(tp))
474 0 : ereport(ERROR,
475 : (errcode(ERRCODE_UNDEFINED_OBJECT),
476 : errmsg("type with OID %u does not exist", type_id)));
477 18542 : typtup = (Form_pg_type) GETSTRUCT(tp);
478 18542 : if (!typtup->typisdefined)
479 0 : ereport(ERROR,
480 : (errcode(ERRCODE_UNDEFINED_OBJECT),
481 : errmsg("type \"%s\" is only a shell",
482 : NameStr(typtup->typname))));
483 :
484 : /* Now make the typcache entry */
485 18542 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
486 : &type_id,
487 : HASH_ENTER, &found);
488 : Assert(!found); /* it wasn't there a moment ago */
489 :
490 1168146 : MemSet(typentry, 0, sizeof(TypeCacheEntry));
491 :
492 : /* These fields can never change, by definition */
493 18542 : typentry->type_id = type_id;
494 18542 : typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
495 :
496 : /* Keep this part in sync with the code below */
497 18542 : typentry->typlen = typtup->typlen;
498 18542 : typentry->typbyval = typtup->typbyval;
499 18542 : typentry->typalign = typtup->typalign;
500 18542 : typentry->typstorage = typtup->typstorage;
501 18542 : typentry->typtype = typtup->typtype;
502 18542 : typentry->typrelid = typtup->typrelid;
503 18542 : typentry->typsubscript = typtup->typsubscript;
504 18542 : typentry->typelem = typtup->typelem;
505 18542 : typentry->typarray = typtup->typarray;
506 18542 : typentry->typcollation = typtup->typcollation;
507 18542 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
508 :
509 : /* If it's a domain, immediately thread it into the domain cache list */
510 18542 : if (typentry->typtype == TYPTYPE_DOMAIN)
511 : {
512 818 : typentry->nextDomain = firstDomainTypeEntry;
513 818 : firstDomainTypeEntry = typentry;
514 : }
515 :
516 18542 : ReleaseSysCache(tp);
517 : }
518 391737 : else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
519 : {
520 : /*
521 : * We have an entry, but its pg_type row got changed, so reload the
522 : * data obtained directly from pg_type.
523 : */
524 : HeapTuple tp;
525 : Form_pg_type typtup;
526 :
527 289 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
528 289 : if (!HeapTupleIsValid(tp))
529 0 : ereport(ERROR,
530 : (errcode(ERRCODE_UNDEFINED_OBJECT),
531 : errmsg("type with OID %u does not exist", type_id)));
532 289 : typtup = (Form_pg_type) GETSTRUCT(tp);
533 289 : if (!typtup->typisdefined)
534 0 : ereport(ERROR,
535 : (errcode(ERRCODE_UNDEFINED_OBJECT),
536 : errmsg("type \"%s\" is only a shell",
537 : NameStr(typtup->typname))));
538 :
539 : /*
540 : * Keep this part in sync with the code above. Many of these fields
541 : * shouldn't ever change, particularly typtype, but copy 'em anyway.
542 : */
543 289 : typentry->typlen = typtup->typlen;
544 289 : typentry->typbyval = typtup->typbyval;
545 289 : typentry->typalign = typtup->typalign;
546 289 : typentry->typstorage = typtup->typstorage;
547 289 : typentry->typtype = typtup->typtype;
548 289 : typentry->typrelid = typtup->typrelid;
549 289 : typentry->typsubscript = typtup->typsubscript;
550 289 : typentry->typelem = typtup->typelem;
551 289 : typentry->typarray = typtup->typarray;
552 289 : typentry->typcollation = typtup->typcollation;
553 289 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
554 :
555 289 : ReleaseSysCache(tp);
556 : }
557 :
558 : /*
559 : * Look up opclasses if we haven't already and any dependent info is
560 : * requested.
561 : */
562 410279 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
563 : TYPECACHE_CMP_PROC |
564 : TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
565 272556 : TYPECACHE_BTREE_OPFAMILY)) &&
566 272556 : !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
567 : {
568 : Oid opclass;
569 :
570 16261 : opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
571 16261 : if (OidIsValid(opclass))
572 : {
573 15704 : typentry->btree_opf = get_opclass_family(opclass);
574 15704 : typentry->btree_opintype = get_opclass_input_type(opclass);
575 : }
576 : else
577 : {
578 557 : typentry->btree_opf = typentry->btree_opintype = InvalidOid;
579 : }
580 :
581 : /*
582 : * Reset information derived from btree opclass. Note in particular
583 : * that we'll redetermine the eq_opr even if we previously found one;
584 : * this matters in case a btree opclass has been added to a type that
585 : * previously had only a hash opclass.
586 : */
587 16261 : typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
588 : TCFLAGS_CHECKED_LT_OPR |
589 : TCFLAGS_CHECKED_GT_OPR |
590 : TCFLAGS_CHECKED_CMP_PROC);
591 16261 : typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
592 : }
593 :
594 : /*
595 : * If we need to look up equality operator, and there's no btree opclass,
596 : * force lookup of hash opclass.
597 : */
598 410279 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
599 256840 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
600 16120 : typentry->btree_opf == InvalidOid)
601 551 : flags |= TYPECACHE_HASH_OPFAMILY;
602 :
603 410279 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
604 : TYPECACHE_HASH_EXTENDED_PROC |
605 : TYPECACHE_HASH_EXTENDED_PROC_FINFO |
606 179852 : TYPECACHE_HASH_OPFAMILY)) &&
607 179852 : !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
608 : {
609 : Oid opclass;
610 :
611 12310 : opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
612 12310 : if (OidIsValid(opclass))
613 : {
614 12186 : typentry->hash_opf = get_opclass_family(opclass);
615 12186 : typentry->hash_opintype = get_opclass_input_type(opclass);
616 : }
617 : else
618 : {
619 124 : typentry->hash_opf = typentry->hash_opintype = InvalidOid;
620 : }
621 :
622 : /*
623 : * Reset information derived from hash opclass. We do *not* reset the
624 : * eq_opr; if we already found one from the btree opclass, that
625 : * decision is still good.
626 : */
627 12310 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
628 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
629 12310 : typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
630 : }
631 :
632 : /*
633 : * Look for requested operators and functions, if we haven't already.
634 : */
635 410279 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
636 256840 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
637 : {
638 16120 : Oid eq_opr = InvalidOid;
639 :
640 16120 : if (typentry->btree_opf != InvalidOid)
641 15569 : eq_opr = get_opfamily_member(typentry->btree_opf,
642 : typentry->btree_opintype,
643 : typentry->btree_opintype,
644 : BTEqualStrategyNumber);
645 16120 : if (eq_opr == InvalidOid &&
646 551 : typentry->hash_opf != InvalidOid)
647 459 : eq_opr = get_opfamily_member(typentry->hash_opf,
648 : typentry->hash_opintype,
649 : typentry->hash_opintype,
650 : HTEqualStrategyNumber);
651 :
652 : /*
653 : * If the proposed equality operator is array_eq or record_eq, check
654 : * to see if the element type or column types support equality. If
655 : * not, array_eq or record_eq would fail at runtime, so we don't want
656 : * to report that the type has equality. (We can omit similar
657 : * checking for ranges and multiranges because ranges can't be created
658 : * in the first place unless their subtypes support equality.)
659 : */
660 16120 : if (eq_opr == ARRAY_EQ_OP &&
661 1678 : !array_element_has_equality(typentry))
662 264 : eq_opr = InvalidOid;
663 15856 : else if (eq_opr == RECORD_EQ_OP &&
664 262 : !record_fields_have_equality(typentry))
665 137 : eq_opr = InvalidOid;
666 :
667 : /* Force update of eq_opr_finfo only if we're changing state */
668 16120 : if (typentry->eq_opr != eq_opr)
669 14915 : typentry->eq_opr_finfo.fn_oid = InvalidOid;
670 :
671 16120 : typentry->eq_opr = eq_opr;
672 :
673 : /*
674 : * Reset info about hash functions whenever we pick up new info about
675 : * equality operator. This is so we can ensure that the hash
676 : * functions match the operator.
677 : */
678 16120 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
679 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
680 16120 : typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
681 : }
682 410279 : if ((flags & TYPECACHE_LT_OPR) &&
683 154338 : !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
684 : {
685 9969 : Oid lt_opr = InvalidOid;
686 :
687 9969 : if (typentry->btree_opf != InvalidOid)
688 9765 : lt_opr = get_opfamily_member(typentry->btree_opf,
689 : typentry->btree_opintype,
690 : typentry->btree_opintype,
691 : BTLessStrategyNumber);
692 :
693 : /*
694 : * As above, make sure array_cmp or record_cmp will succeed; but again
695 : * we need no special check for ranges or multiranges.
696 : */
697 9969 : if (lt_opr == ARRAY_LT_OP &&
698 1200 : !array_element_has_compare(typentry))
699 285 : lt_opr = InvalidOid;
700 9684 : else if (lt_opr == RECORD_LT_OP &&
701 69 : !record_fields_have_compare(typentry))
702 6 : lt_opr = InvalidOid;
703 :
704 9969 : typentry->lt_opr = lt_opr;
705 9969 : typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
706 : }
707 410279 : if ((flags & TYPECACHE_GT_OPR) &&
708 149402 : !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
709 : {
710 9887 : Oid gt_opr = InvalidOid;
711 :
712 9887 : if (typentry->btree_opf != InvalidOid)
713 9690 : gt_opr = get_opfamily_member(typentry->btree_opf,
714 : typentry->btree_opintype,
715 : typentry->btree_opintype,
716 : BTGreaterStrategyNumber);
717 :
718 : /*
719 : * As above, make sure array_cmp or record_cmp will succeed; but again
720 : * we need no special check for ranges or multiranges.
721 : */
722 9887 : if (gt_opr == ARRAY_GT_OP &&
723 1195 : !array_element_has_compare(typentry))
724 285 : gt_opr = InvalidOid;
725 9602 : else if (gt_opr == RECORD_GT_OP &&
726 69 : !record_fields_have_compare(typentry))
727 6 : gt_opr = InvalidOid;
728 :
729 9887 : typentry->gt_opr = gt_opr;
730 9887 : typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
731 : }
732 410279 : if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
733 15094 : !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
734 : {
735 2573 : Oid cmp_proc = InvalidOid;
736 :
737 2573 : if (typentry->btree_opf != InvalidOid)
738 2465 : cmp_proc = get_opfamily_proc(typentry->btree_opf,
739 : typentry->btree_opintype,
740 : typentry->btree_opintype,
741 : BTORDER_PROC);
742 :
743 : /*
744 : * As above, make sure array_cmp or record_cmp will succeed; but again
745 : * we need no special check for ranges or multiranges.
746 : */
747 2573 : if (cmp_proc == F_BTARRAYCMP &&
748 550 : !array_element_has_compare(typentry))
749 128 : cmp_proc = InvalidOid;
750 2445 : else if (cmp_proc == F_BTRECORDCMP &&
751 165 : !record_fields_have_compare(typentry))
752 128 : cmp_proc = InvalidOid;
753 :
754 : /* Force update of cmp_proc_finfo only if we're changing state */
755 2573 : if (typentry->cmp_proc != cmp_proc)
756 2178 : typentry->cmp_proc_finfo.fn_oid = InvalidOid;
757 :
758 2573 : typentry->cmp_proc = cmp_proc;
759 2573 : typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
760 : }
761 410279 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
762 179402 : !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
763 : {
764 12181 : Oid hash_proc = InvalidOid;
765 :
766 : /*
767 : * We insist that the eq_opr, if one has been determined, match the
768 : * hash opclass; else report there is no hash function.
769 : */
770 12181 : if (typentry->hash_opf != InvalidOid &&
771 23561 : (!OidIsValid(typentry->eq_opr) ||
772 11466 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
773 : typentry->hash_opintype,
774 : typentry->hash_opintype,
775 : HTEqualStrategyNumber)))
776 12095 : hash_proc = get_opfamily_proc(typentry->hash_opf,
777 : typentry->hash_opintype,
778 : typentry->hash_opintype,
779 : HASHSTANDARD_PROC);
780 :
781 : /*
782 : * As above, make sure hash_array, hash_record, or hash_range will
783 : * succeed.
784 : */
785 12181 : if (hash_proc == F_HASH_ARRAY &&
786 1217 : !array_element_has_hashing(typentry))
787 173 : hash_proc = InvalidOid;
788 12008 : else if (hash_proc == F_HASH_RECORD &&
789 251 : !record_fields_have_hashing(typentry))
790 156 : hash_proc = InvalidOid;
791 11852 : else if (hash_proc == F_HASH_RANGE &&
792 60 : !range_element_has_hashing(typentry))
793 3 : hash_proc = InvalidOid;
794 :
795 : /*
796 : * Likewise for hash_multirange.
797 : */
798 12181 : if (hash_proc == F_HASH_MULTIRANGE &&
799 9 : !multirange_element_has_hashing(typentry))
800 3 : hash_proc = InvalidOid;
801 :
802 : /* Force update of hash_proc_finfo only if we're changing state */
803 12181 : if (typentry->hash_proc != hash_proc)
804 10883 : typentry->hash_proc_finfo.fn_oid = InvalidOid;
805 :
806 12181 : typentry->hash_proc = hash_proc;
807 12181 : typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
808 : }
809 410279 : if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
810 5453 : TYPECACHE_HASH_EXTENDED_PROC_FINFO)) &&
811 5453 : !(typentry->flags & TCFLAGS_CHECKED_HASH_EXTENDED_PROC))
812 : {
813 2181 : Oid hash_extended_proc = InvalidOid;
814 :
815 : /*
816 : * We insist that the eq_opr, if one has been determined, match the
817 : * hash opclass; else report there is no hash function.
818 : */
819 2181 : if (typentry->hash_opf != InvalidOid &&
820 4004 : (!OidIsValid(typentry->eq_opr) ||
821 1842 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
822 : typentry->hash_opintype,
823 : typentry->hash_opintype,
824 : HTEqualStrategyNumber)))
825 2162 : hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
826 : typentry->hash_opintype,
827 : typentry->hash_opintype,
828 : HASHEXTENDED_PROC);
829 :
830 : /*
831 : * As above, make sure hash_array_extended, hash_record_extended, or
832 : * hash_range_extended will succeed.
833 : */
834 2181 : if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
835 262 : !array_element_has_extended_hashing(typentry))
836 128 : hash_extended_proc = InvalidOid;
837 2053 : else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
838 135 : !record_fields_have_extended_hashing(typentry))
839 131 : hash_extended_proc = InvalidOid;
840 1922 : else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
841 0 : !range_element_has_extended_hashing(typentry))
842 0 : hash_extended_proc = InvalidOid;
843 :
844 : /*
845 : * Likewise for hash_multirange_extended.
846 : */
847 2181 : if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
848 0 : !multirange_element_has_extended_hashing(typentry))
849 0 : hash_extended_proc = InvalidOid;
850 :
851 : /* Force update of proc finfo only if we're changing state */
852 2181 : if (typentry->hash_extended_proc != hash_extended_proc)
853 1887 : typentry->hash_extended_proc_finfo.fn_oid = InvalidOid;
854 :
855 2181 : typentry->hash_extended_proc = hash_extended_proc;
856 2181 : typentry->flags |= TCFLAGS_CHECKED_HASH_EXTENDED_PROC;
857 : }
858 :
859 : /*
860 : * Set up fmgr lookup info as requested
861 : *
862 : * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
863 : * which is not quite right (they're really in the hash table's private
864 : * memory context) but this will do for our purposes.
865 : *
866 : * Note: the code above avoids invalidating the finfo structs unless the
867 : * referenced operator/function OID actually changes. This is to prevent
868 : * unnecessary leakage of any subsidiary data attached to an finfo, since
869 : * that would cause session-lifespan memory leaks.
870 : */
871 410279 : if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
872 3059 : typentry->eq_opr_finfo.fn_oid == InvalidOid &&
873 958 : typentry->eq_opr != InvalidOid)
874 : {
875 : Oid eq_opr_func;
876 :
877 955 : eq_opr_func = get_opcode(typentry->eq_opr);
878 955 : if (eq_opr_func != InvalidOid)
879 955 : fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
880 : CacheMemoryContext);
881 : }
882 410279 : if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
883 8161 : typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
884 2204 : typentry->cmp_proc != InvalidOid)
885 : {
886 850 : fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
887 : CacheMemoryContext);
888 : }
889 410279 : if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
890 4248 : typentry->hash_proc_finfo.fn_oid == InvalidOid &&
891 840 : typentry->hash_proc != InvalidOid)
892 : {
893 736 : fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
894 : CacheMemoryContext);
895 : }
896 410279 : if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
897 57 : typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
898 18 : typentry->hash_extended_proc != InvalidOid)
899 : {
900 12 : fmgr_info_cxt(typentry->hash_extended_proc,
901 : &typentry->hash_extended_proc_finfo,
902 : CacheMemoryContext);
903 : }
904 :
905 : /*
906 : * If it's a composite type (row type), get tupdesc if requested
907 : */
908 410279 : if ((flags & TYPECACHE_TUPDESC) &&
909 50342 : typentry->tupDesc == NULL &&
910 2051 : typentry->typtype == TYPTYPE_COMPOSITE)
911 : {
912 1986 : load_typcache_tupdesc(typentry);
913 : }
914 :
915 : /*
916 : * If requested, get information about a range type
917 : *
918 : * This includes making sure that the basic info about the range element
919 : * type is up-to-date.
920 : */
921 410279 : if ((flags & TYPECACHE_RANGE_INFO) &&
922 15363 : typentry->typtype == TYPTYPE_RANGE)
923 : {
924 15363 : if (typentry->rngelemtype == NULL)
925 384 : load_rangetype_info(typentry);
926 14979 : else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
927 3 : (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
928 : }
929 :
930 : /*
931 : * If requested, get information about a multirange type
932 : */
933 410279 : if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
934 6424 : typentry->rngtype == NULL &&
935 105 : typentry->typtype == TYPTYPE_MULTIRANGE)
936 : {
937 105 : load_multirangetype_info(typentry);
938 : }
939 :
940 : /*
941 : * If requested, get information about a domain type
942 : */
943 410279 : if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
944 4837 : typentry->domainBaseType == InvalidOid &&
945 3350 : typentry->typtype == TYPTYPE_DOMAIN)
946 : {
947 251 : typentry->domainBaseTypmod = -1;
948 251 : typentry->domainBaseType =
949 251 : getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
950 : }
951 410279 : if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
952 21915 : (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
953 2548 : typentry->typtype == TYPTYPE_DOMAIN)
954 : {
955 1342 : load_domaintype_info(typentry);
956 : }
957 :
958 410279 : INJECTION_POINT("typecache-before-rel-type-cache-insert", NULL);
959 :
960 : Assert(in_progress_offset + 1 == in_progress_list_len);
961 410278 : in_progress_list_len--;
962 :
963 410278 : insert_rel_type_cache_if_needed(typentry);
964 :
965 410278 : return typentry;
966 : }
967 :
968 : /*
969 : * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
970 : */
971 : static void
972 2141 : load_typcache_tupdesc(TypeCacheEntry *typentry)
973 : {
974 : Relation rel;
975 :
976 2141 : if (!OidIsValid(typentry->typrelid)) /* should not happen */
977 0 : elog(ERROR, "invalid typrelid for composite type %u",
978 : typentry->type_id);
979 2141 : rel = relation_open(typentry->typrelid, AccessShareLock);
980 : Assert(rel->rd_rel->reltype == typentry->type_id);
981 :
982 : /*
983 : * Link to the tupdesc and increment its refcount (we assert it's a
984 : * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
985 : * because the reference mustn't be entered in the current resource owner;
986 : * it can outlive the current query.
987 : */
988 2141 : typentry->tupDesc = RelationGetDescr(rel);
989 :
990 : Assert(typentry->tupDesc->tdrefcount > 0);
991 2141 : typentry->tupDesc->tdrefcount++;
992 :
993 : /*
994 : * In future, we could take some pains to not change tupDesc_identifier if
995 : * the tupdesc didn't really change; but for now it's not worth it.
996 : */
997 2141 : typentry->tupDesc_identifier = ++tupledesc_id_counter;
998 :
999 2141 : relation_close(rel, AccessShareLock);
1000 2141 : }
1001 :
1002 : /*
1003 : * load_rangetype_info --- helper routine to set up range type information
1004 : */
1005 : static void
1006 428 : load_rangetype_info(TypeCacheEntry *typentry)
1007 : {
1008 : Form_pg_range pg_range;
1009 : HeapTuple tup;
1010 : Oid subtypeOid;
1011 : Oid opclassOid;
1012 : Oid canonicalOid;
1013 : Oid subdiffOid;
1014 : Oid opfamilyOid;
1015 : Oid opcintype;
1016 : Oid cmpFnOid;
1017 :
1018 : /* get information from pg_range */
1019 428 : tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
1020 : /* should not fail, since we already checked typtype ... */
1021 428 : if (!HeapTupleIsValid(tup))
1022 0 : elog(ERROR, "cache lookup failed for range type %u",
1023 : typentry->type_id);
1024 428 : pg_range = (Form_pg_range) GETSTRUCT(tup);
1025 :
1026 428 : subtypeOid = pg_range->rngsubtype;
1027 428 : typentry->rng_collation = pg_range->rngcollation;
1028 428 : opclassOid = pg_range->rngsubopc;
1029 428 : canonicalOid = pg_range->rngcanonical;
1030 428 : subdiffOid = pg_range->rngsubdiff;
1031 :
1032 428 : ReleaseSysCache(tup);
1033 :
1034 : /* get opclass properties and look up the comparison function */
1035 428 : opfamilyOid = get_opclass_family(opclassOid);
1036 428 : opcintype = get_opclass_input_type(opclassOid);
1037 428 : typentry->rng_opfamily = opfamilyOid;
1038 :
1039 428 : cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1040 : BTORDER_PROC);
1041 428 : if (!RegProcedureIsValid(cmpFnOid))
1042 0 : elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1043 : BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1044 :
1045 : /* set up cached fmgrinfo structs */
1046 428 : fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
1047 : CacheMemoryContext);
1048 428 : if (OidIsValid(canonicalOid))
1049 292 : fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
1050 : CacheMemoryContext);
1051 428 : if (OidIsValid(subdiffOid))
1052 358 : fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
1053 : CacheMemoryContext);
1054 :
1055 : /* Lastly, set up link to the element type --- this marks data valid */
1056 428 : typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
1057 428 : }
1058 :
1059 : /*
1060 : * load_multirangetype_info --- helper routine to set up multirange type
1061 : * information
1062 : */
1063 : static void
1064 105 : load_multirangetype_info(TypeCacheEntry *typentry)
1065 : {
1066 : Oid rangetypeOid;
1067 :
1068 105 : rangetypeOid = get_multirange_range(typentry->type_id);
1069 105 : if (!OidIsValid(rangetypeOid))
1070 0 : elog(ERROR, "cache lookup failed for multirange type %u",
1071 : typentry->type_id);
1072 :
1073 105 : typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
1074 105 : }
1075 :
1076 : /*
1077 : * load_domaintype_info --- helper routine to set up domain constraint info
1078 : *
1079 : * Note: we assume we're called in a relatively short-lived context, so it's
1080 : * okay to leak data into the current context while scanning pg_constraint.
1081 : * We build the new DomainConstraintCache data in a context underneath
1082 : * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1083 : * complete.
1084 : */
1085 : static void
1086 1342 : load_domaintype_info(TypeCacheEntry *typentry)
1087 : {
1088 1342 : Oid typeOid = typentry->type_id;
1089 : DomainConstraintCache *dcc;
1090 1342 : bool notNull = false;
1091 : DomainConstraintState **ccons;
1092 : int cconslen;
1093 : Relation conRel;
1094 : MemoryContext oldcxt;
1095 :
1096 : /*
1097 : * If we're here, any existing constraint info is stale, so release it.
1098 : * For safety, be sure to null the link before trying to delete the data.
1099 : */
1100 1342 : if (typentry->domainData)
1101 : {
1102 321 : dcc = typentry->domainData;
1103 321 : typentry->domainData = NULL;
1104 321 : decr_dcc_refcount(dcc);
1105 : }
1106 :
1107 : /*
1108 : * We try to optimize the common case of no domain constraints, so don't
1109 : * create the dcc object and context until we find a constraint. Likewise
1110 : * for the temp sorting array.
1111 : */
1112 1342 : dcc = NULL;
1113 1342 : ccons = NULL;
1114 1342 : cconslen = 0;
1115 :
1116 : /*
1117 : * Scan pg_constraint for relevant constraints. We want to find
1118 : * constraints for not just this domain, but any ancestor domains, so the
1119 : * outer loop crawls up the domain stack.
1120 : */
1121 1342 : conRel = table_open(ConstraintRelationId, AccessShareLock);
1122 :
1123 : for (;;)
1124 1361 : {
1125 : HeapTuple tup;
1126 : HeapTuple conTup;
1127 : Form_pg_type typTup;
1128 2703 : int nccons = 0;
1129 : ScanKeyData key[1];
1130 : SysScanDesc scan;
1131 :
1132 2703 : tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1133 2703 : if (!HeapTupleIsValid(tup))
1134 0 : elog(ERROR, "cache lookup failed for type %u", typeOid);
1135 2703 : typTup = (Form_pg_type) GETSTRUCT(tup);
1136 :
1137 2703 : if (typTup->typtype != TYPTYPE_DOMAIN)
1138 : {
1139 : /* Not a domain, so done */
1140 1342 : ReleaseSysCache(tup);
1141 1342 : break;
1142 : }
1143 :
1144 : /* Test for NOT NULL Constraint */
1145 1361 : if (typTup->typnotnull)
1146 73 : notNull = true;
1147 :
1148 : /* Look for CHECK Constraints on this domain */
1149 1361 : ScanKeyInit(&key[0],
1150 : Anum_pg_constraint_contypid,
1151 : BTEqualStrategyNumber, F_OIDEQ,
1152 : ObjectIdGetDatum(typeOid));
1153 :
1154 1361 : scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1155 : NULL, 1, key);
1156 :
1157 2050 : while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1158 : {
1159 689 : Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
1160 : Datum val;
1161 : bool isNull;
1162 : char *constring;
1163 : Expr *check_expr;
1164 : DomainConstraintState *r;
1165 :
1166 : /* Ignore non-CHECK constraints */
1167 689 : if (c->contype != CONSTRAINT_CHECK)
1168 73 : continue;
1169 :
1170 : /* Not expecting conbin to be NULL, but we'll test for it anyway */
1171 616 : val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1172 : conRel->rd_att, &isNull);
1173 616 : if (isNull)
1174 0 : elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1175 : NameStr(typTup->typname), NameStr(c->conname));
1176 :
1177 : /* Create the DomainConstraintCache object and context if needed */
1178 616 : if (dcc == NULL)
1179 : {
1180 : MemoryContext cxt;
1181 :
1182 599 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1183 : "Domain constraints",
1184 : ALLOCSET_SMALL_SIZES);
1185 : dcc = (DomainConstraintCache *)
1186 599 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1187 599 : dcc->constraints = NIL;
1188 599 : dcc->dccContext = cxt;
1189 599 : dcc->dccRefCount = 0;
1190 : }
1191 :
1192 : /* Convert conbin to a node tree, still in caller's context */
1193 616 : constring = TextDatumGetCString(val);
1194 616 : check_expr = (Expr *) stringToNode(constring);
1195 :
1196 : /*
1197 : * Plan the expression, since ExecInitExpr will expect that.
1198 : *
1199 : * Note: caching the result of expression_planner() is not very
1200 : * good practice. Ideally we'd use a CachedExpression here so
1201 : * that we would react promptly to, eg, changes in inlined
1202 : * functions. However, because we don't support mutable domain
1203 : * CHECK constraints, it's not really clear that it's worth the
1204 : * extra overhead to do that.
1205 : */
1206 616 : check_expr = expression_planner(check_expr);
1207 :
1208 : /* Create only the minimally needed stuff in dccContext */
1209 616 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1210 :
1211 616 : r = makeNode(DomainConstraintState);
1212 616 : r->constrainttype = DOM_CONSTRAINT_CHECK;
1213 616 : r->name = pstrdup(NameStr(c->conname));
1214 616 : r->check_expr = copyObject(check_expr);
1215 616 : r->check_exprstate = NULL;
1216 :
1217 616 : MemoryContextSwitchTo(oldcxt);
1218 :
1219 : /* Accumulate constraints in an array, for sorting below */
1220 616 : if (ccons == NULL)
1221 : {
1222 599 : cconslen = 8;
1223 : ccons = (DomainConstraintState **)
1224 599 : palloc(cconslen * sizeof(DomainConstraintState *));
1225 : }
1226 17 : else if (nccons >= cconslen)
1227 : {
1228 0 : cconslen *= 2;
1229 : ccons = (DomainConstraintState **)
1230 0 : repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1231 : }
1232 616 : ccons[nccons++] = r;
1233 : }
1234 :
1235 1361 : systable_endscan(scan);
1236 :
1237 1361 : if (nccons > 0)
1238 : {
1239 : /*
1240 : * Sort the items for this domain, so that CHECKs are applied in a
1241 : * deterministic order.
1242 : */
1243 608 : if (nccons > 1)
1244 7 : qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1245 :
1246 : /*
1247 : * Now attach them to the overall list. Use lcons() here because
1248 : * constraints of parent domains should be applied earlier.
1249 : */
1250 608 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1251 1224 : while (nccons > 0)
1252 616 : dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1253 608 : MemoryContextSwitchTo(oldcxt);
1254 : }
1255 :
1256 : /* loop to next domain in stack */
1257 1361 : typeOid = typTup->typbasetype;
1258 1361 : ReleaseSysCache(tup);
1259 : }
1260 :
1261 1342 : table_close(conRel, AccessShareLock);
1262 :
1263 : /*
1264 : * Only need to add one NOT NULL check regardless of how many domains in
1265 : * the stack request it.
1266 : */
1267 1342 : if (notNull)
1268 : {
1269 : DomainConstraintState *r;
1270 :
1271 : /* Create the DomainConstraintCache object and context if needed */
1272 73 : if (dcc == NULL)
1273 : {
1274 : MemoryContext cxt;
1275 :
1276 54 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1277 : "Domain constraints",
1278 : ALLOCSET_SMALL_SIZES);
1279 : dcc = (DomainConstraintCache *)
1280 54 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1281 54 : dcc->constraints = NIL;
1282 54 : dcc->dccContext = cxt;
1283 54 : dcc->dccRefCount = 0;
1284 : }
1285 :
1286 : /* Create node trees in DomainConstraintCache's context */
1287 73 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1288 :
1289 73 : r = makeNode(DomainConstraintState);
1290 :
1291 73 : r->constrainttype = DOM_CONSTRAINT_NOTNULL;
1292 73 : r->name = pstrdup("NOT NULL");
1293 73 : r->check_expr = NULL;
1294 73 : r->check_exprstate = NULL;
1295 :
1296 : /* lcons to apply the nullness check FIRST */
1297 73 : dcc->constraints = lcons(r, dcc->constraints);
1298 :
1299 73 : MemoryContextSwitchTo(oldcxt);
1300 : }
1301 :
1302 : /*
1303 : * If we made a constraint object, move it into CacheMemoryContext and
1304 : * attach it to the typcache entry.
1305 : */
1306 1342 : if (dcc)
1307 : {
1308 653 : MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
1309 653 : typentry->domainData = dcc;
1310 653 : dcc->dccRefCount++; /* count the typcache's reference */
1311 : }
1312 :
1313 : /* Either way, the typcache entry's domain data is now valid. */
1314 1342 : typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
1315 1342 : }
1316 :
1317 : /*
1318 : * qsort comparator to sort DomainConstraintState pointers by name
1319 : */
1320 : static int
1321 8 : dcs_cmp(const void *a, const void *b)
1322 : {
1323 8 : const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1324 8 : const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1325 :
1326 8 : return strcmp((*ca)->name, (*cb)->name);
1327 : }
1328 :
1329 : /*
1330 : * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1331 : * and free it if no references remain
1332 : */
1333 : static void
1334 6511 : decr_dcc_refcount(DomainConstraintCache *dcc)
1335 : {
1336 : Assert(dcc->dccRefCount > 0);
1337 6511 : if (--(dcc->dccRefCount) <= 0)
1338 319 : MemoryContextDelete(dcc->dccContext);
1339 6511 : }
1340 :
1341 : /*
1342 : * Context reset/delete callback for a DomainConstraintRef
1343 : */
1344 : static void
1345 6546 : dccref_deletion_callback(void *arg)
1346 : {
1347 6546 : DomainConstraintRef *ref = (DomainConstraintRef *) arg;
1348 6546 : DomainConstraintCache *dcc = ref->dcc;
1349 :
1350 : /* Paranoia --- be sure link is nulled before trying to release */
1351 6546 : if (dcc)
1352 : {
1353 6190 : ref->constraints = NIL;
1354 6190 : ref->dcc = NULL;
1355 6190 : decr_dcc_refcount(dcc);
1356 : }
1357 6546 : }
1358 :
1359 : /*
1360 : * prep_domain_constraints --- prepare domain constraints for execution
1361 : *
1362 : * The expression trees stored in the DomainConstraintCache's list are
1363 : * converted to executable expression state trees stored in execctx.
1364 : */
1365 : static List *
1366 1300 : prep_domain_constraints(List *constraints, MemoryContext execctx)
1367 : {
1368 1300 : List *result = NIL;
1369 : MemoryContext oldcxt;
1370 : ListCell *lc;
1371 :
1372 1300 : oldcxt = MemoryContextSwitchTo(execctx);
1373 :
1374 2627 : foreach(lc, constraints)
1375 : {
1376 1327 : DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
1377 : DomainConstraintState *newr;
1378 :
1379 1327 : newr = makeNode(DomainConstraintState);
1380 1327 : newr->constrainttype = r->constrainttype;
1381 1327 : newr->name = r->name;
1382 1327 : newr->check_expr = r->check_expr;
1383 1327 : newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1384 :
1385 1327 : result = lappend(result, newr);
1386 : }
1387 :
1388 1300 : MemoryContextSwitchTo(oldcxt);
1389 :
1390 1300 : return result;
1391 : }
1392 :
1393 : /*
1394 : * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1395 : *
1396 : * Caller must tell us the MemoryContext in which the DomainConstraintRef
1397 : * lives. The ref will be cleaned up when that context is reset/deleted.
1398 : *
1399 : * Caller must also tell us whether it wants check_exprstate fields to be
1400 : * computed in the DomainConstraintState nodes attached to this ref.
1401 : * If it doesn't, we need not make a copy of the DomainConstraintState list.
1402 : */
1403 : void
1404 6560 : InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
1405 : MemoryContext refctx, bool need_exprstate)
1406 : {
1407 : /* Look up the typcache entry --- we assume it survives indefinitely */
1408 6560 : ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1409 6560 : ref->need_exprstate = need_exprstate;
1410 : /* For safety, establish the callback before acquiring a refcount */
1411 6560 : ref->refctx = refctx;
1412 6560 : ref->dcc = NULL;
1413 6560 : ref->callback.func = dccref_deletion_callback;
1414 6560 : ref->callback.arg = ref;
1415 6560 : MemoryContextRegisterResetCallback(refctx, &ref->callback);
1416 : /* Acquire refcount if there are constraints, and set up exported list */
1417 6560 : if (ref->tcache->domainData)
1418 : {
1419 6204 : ref->dcc = ref->tcache->domainData;
1420 6204 : ref->dcc->dccRefCount++;
1421 6204 : if (ref->need_exprstate)
1422 1300 : ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1423 : ref->refctx);
1424 : else
1425 4904 : ref->constraints = ref->dcc->constraints;
1426 : }
1427 : else
1428 356 : ref->constraints = NIL;
1429 6560 : }
1430 :
1431 : /*
1432 : * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1433 : *
1434 : * If the domain's constraint set changed, ref->constraints is updated to
1435 : * point at a new list of cached constraints.
1436 : *
1437 : * In the normal case where nothing happened to the domain, this is cheap
1438 : * enough that it's reasonable (and expected) to check before *each* use
1439 : * of the constraint info.
1440 : */
1441 : void
1442 223859 : UpdateDomainConstraintRef(DomainConstraintRef *ref)
1443 : {
1444 223859 : TypeCacheEntry *typentry = ref->tcache;
1445 :
1446 : /* Make sure typcache entry's data is up to date */
1447 223859 : if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1448 0 : typentry->typtype == TYPTYPE_DOMAIN)
1449 0 : load_domaintype_info(typentry);
1450 :
1451 : /* Transfer to ref object if there's new info, adjusting refcounts */
1452 223859 : if (ref->dcc != typentry->domainData)
1453 : {
1454 : /* Paranoia --- be sure link is nulled before trying to release */
1455 0 : DomainConstraintCache *dcc = ref->dcc;
1456 :
1457 0 : if (dcc)
1458 : {
1459 : /*
1460 : * Note: we just leak the previous list of executable domain
1461 : * constraints. Alternatively, we could keep those in a child
1462 : * context of ref->refctx and free that context at this point.
1463 : * However, in practice this code path will be taken so seldom
1464 : * that the extra bookkeeping for a child context doesn't seem
1465 : * worthwhile; we'll just allow a leak for the lifespan of refctx.
1466 : */
1467 0 : ref->constraints = NIL;
1468 0 : ref->dcc = NULL;
1469 0 : decr_dcc_refcount(dcc);
1470 : }
1471 0 : dcc = typentry->domainData;
1472 0 : if (dcc)
1473 : {
1474 0 : ref->dcc = dcc;
1475 0 : dcc->dccRefCount++;
1476 0 : if (ref->need_exprstate)
1477 0 : ref->constraints = prep_domain_constraints(dcc->constraints,
1478 : ref->refctx);
1479 : else
1480 0 : ref->constraints = dcc->constraints;
1481 : }
1482 : }
1483 223859 : }
1484 :
1485 : /*
1486 : * DomainHasConstraints --- utility routine to check if a domain has constraints
1487 : *
1488 : * This is defined to return false, not fail, if type is not a domain.
1489 : */
1490 : bool
1491 15355 : DomainHasConstraints(Oid type_id)
1492 : {
1493 : TypeCacheEntry *typentry;
1494 :
1495 : /*
1496 : * Note: a side effect is to cause the typcache's domain data to become
1497 : * valid. This is fine since we'll likely need it soon if there is any.
1498 : */
1499 15355 : typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1500 :
1501 15355 : return (typentry->domainData != NULL);
1502 : }
1503 :
1504 :
1505 : /*
1506 : * array_element_has_equality and friends are helper routines to check
1507 : * whether we should believe that array_eq and related functions will work
1508 : * on the given array type or composite type.
1509 : *
1510 : * The logic above may call these repeatedly on the same type entry, so we
1511 : * make use of the typentry->flags field to cache the results once known.
1512 : * Also, we assume that we'll probably want all these facts about the type
1513 : * if we want any, so we cache them all using only one lookup of the
1514 : * component datatype(s).
1515 : */
1516 :
1517 : static bool
1518 1678 : array_element_has_equality(TypeCacheEntry *typentry)
1519 : {
1520 1678 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1521 1418 : cache_array_element_properties(typentry);
1522 1678 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1523 : }
1524 :
1525 : static bool
1526 2945 : array_element_has_compare(TypeCacheEntry *typentry)
1527 : {
1528 2945 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1529 284 : cache_array_element_properties(typentry);
1530 2945 : return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1531 : }
1532 :
1533 : static bool
1534 1217 : array_element_has_hashing(TypeCacheEntry *typentry)
1535 : {
1536 1217 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1537 0 : cache_array_element_properties(typentry);
1538 1217 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1539 : }
1540 :
1541 : static bool
1542 262 : array_element_has_extended_hashing(TypeCacheEntry *typentry)
1543 : {
1544 262 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1545 0 : cache_array_element_properties(typentry);
1546 262 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1547 : }
1548 :
1549 : static void
1550 1702 : cache_array_element_properties(TypeCacheEntry *typentry)
1551 : {
1552 1702 : Oid elem_type = get_base_element_type(typentry->type_id);
1553 :
1554 1702 : if (OidIsValid(elem_type))
1555 : {
1556 : TypeCacheEntry *elementry;
1557 :
1558 1566 : elementry = lookup_type_cache(elem_type,
1559 : TYPECACHE_EQ_OPR |
1560 : TYPECACHE_CMP_PROC |
1561 : TYPECACHE_HASH_PROC |
1562 : TYPECACHE_HASH_EXTENDED_PROC);
1563 1566 : if (OidIsValid(elementry->eq_opr))
1564 1438 : typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1565 1566 : if (OidIsValid(elementry->cmp_proc))
1566 1339 : typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1567 1566 : if (OidIsValid(elementry->hash_proc))
1568 1432 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1569 1566 : if (OidIsValid(elementry->hash_extended_proc))
1570 1432 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1571 : }
1572 1702 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1573 1702 : }
1574 :
1575 : /*
1576 : * Likewise, some helper functions for composite types.
1577 : */
1578 :
1579 : static bool
1580 262 : record_fields_have_equality(TypeCacheEntry *typentry)
1581 : {
1582 262 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1583 245 : cache_record_field_properties(typentry);
1584 262 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1585 : }
1586 :
1587 : static bool
1588 303 : record_fields_have_compare(TypeCacheEntry *typentry)
1589 : {
1590 303 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1591 33 : cache_record_field_properties(typentry);
1592 303 : return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1593 : }
1594 :
1595 : static bool
1596 251 : record_fields_have_hashing(TypeCacheEntry *typentry)
1597 : {
1598 251 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1599 3 : cache_record_field_properties(typentry);
1600 251 : return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1601 : }
1602 :
1603 : static bool
1604 135 : record_fields_have_extended_hashing(TypeCacheEntry *typentry)
1605 : {
1606 135 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1607 0 : cache_record_field_properties(typentry);
1608 135 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1609 : }
1610 :
1611 : static void
1612 281 : cache_record_field_properties(TypeCacheEntry *typentry)
1613 : {
1614 : /*
1615 : * For type RECORD, we can't really tell what will work, since we don't
1616 : * have access here to the specific anonymous type. Just assume that
1617 : * equality and comparison will (we may get a failure at runtime). We
1618 : * could also claim that hashing works, but then if code that has the
1619 : * option between a comparison-based (sort-based) and a hash-based plan
1620 : * chooses hashing, stuff could fail that would otherwise work if it chose
1621 : * a comparison-based plan. In practice more types support comparison
1622 : * than hashing.
1623 : */
1624 281 : if (typentry->type_id == RECORDOID)
1625 : {
1626 25 : typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1627 : TCFLAGS_HAVE_FIELD_COMPARE);
1628 : }
1629 256 : else if (typentry->typtype == TYPTYPE_COMPOSITE)
1630 : {
1631 : TupleDesc tupdesc;
1632 : int newflags;
1633 : int i;
1634 :
1635 : /* Fetch composite type's tupdesc if we don't have it already */
1636 256 : if (typentry->tupDesc == NULL)
1637 155 : load_typcache_tupdesc(typentry);
1638 256 : tupdesc = typentry->tupDesc;
1639 :
1640 : /* Must bump the refcount while we do additional catalog lookups */
1641 256 : IncrTupleDescRefCount(tupdesc);
1642 :
1643 : /* Have each property if all non-dropped fields have the property */
1644 256 : newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1645 : TCFLAGS_HAVE_FIELD_COMPARE |
1646 : TCFLAGS_HAVE_FIELD_HASHING |
1647 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1648 3880 : for (i = 0; i < tupdesc->natts; i++)
1649 : {
1650 : TypeCacheEntry *fieldentry;
1651 3761 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1652 :
1653 3761 : if (attr->attisdropped)
1654 0 : continue;
1655 :
1656 3761 : fieldentry = lookup_type_cache(attr->atttypid,
1657 : TYPECACHE_EQ_OPR |
1658 : TYPECACHE_CMP_PROC |
1659 : TYPECACHE_HASH_PROC |
1660 : TYPECACHE_HASH_EXTENDED_PROC);
1661 3761 : if (!OidIsValid(fieldentry->eq_opr))
1662 137 : newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1663 3761 : if (!OidIsValid(fieldentry->cmp_proc))
1664 137 : newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1665 3761 : if (!OidIsValid(fieldentry->hash_proc))
1666 140 : newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1667 3761 : if (!OidIsValid(fieldentry->hash_extended_proc))
1668 140 : newflags &= ~TCFLAGS_HAVE_FIELD_EXTENDED_HASHING;
1669 :
1670 : /* We can drop out of the loop once we disprove all bits */
1671 3761 : if (newflags == 0)
1672 137 : break;
1673 : }
1674 256 : typentry->flags |= newflags;
1675 :
1676 256 : DecrTupleDescRefCount(tupdesc);
1677 : }
1678 0 : else if (typentry->typtype == TYPTYPE_DOMAIN)
1679 : {
1680 : /* If it's domain over composite, copy base type's properties */
1681 : TypeCacheEntry *baseentry;
1682 :
1683 : /* load up basetype info if we didn't already */
1684 0 : if (typentry->domainBaseType == InvalidOid)
1685 : {
1686 0 : typentry->domainBaseTypmod = -1;
1687 0 : typentry->domainBaseType =
1688 0 : getBaseTypeAndTypmod(typentry->type_id,
1689 : &typentry->domainBaseTypmod);
1690 : }
1691 0 : baseentry = lookup_type_cache(typentry->domainBaseType,
1692 : TYPECACHE_EQ_OPR |
1693 : TYPECACHE_CMP_PROC |
1694 : TYPECACHE_HASH_PROC |
1695 : TYPECACHE_HASH_EXTENDED_PROC);
1696 0 : if (baseentry->typtype == TYPTYPE_COMPOSITE)
1697 : {
1698 0 : typentry->flags |= TCFLAGS_DOMAIN_BASE_IS_COMPOSITE;
1699 0 : typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1700 : TCFLAGS_HAVE_FIELD_COMPARE |
1701 : TCFLAGS_HAVE_FIELD_HASHING |
1702 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1703 : }
1704 : }
1705 281 : typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
1706 281 : }
1707 :
1708 : /*
1709 : * Likewise, some helper functions for range and multirange types.
1710 : *
1711 : * We can borrow the flag bits for array element properties to use for range
1712 : * element properties, since those flag bits otherwise have no use in a
1713 : * range or multirange type's typcache entry.
1714 : */
1715 :
1716 : static bool
1717 60 : range_element_has_hashing(TypeCacheEntry *typentry)
1718 : {
1719 60 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1720 60 : cache_range_element_properties(typentry);
1721 60 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1722 : }
1723 :
1724 : static bool
1725 0 : range_element_has_extended_hashing(TypeCacheEntry *typentry)
1726 : {
1727 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1728 0 : cache_range_element_properties(typentry);
1729 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1730 : }
1731 :
1732 : static void
1733 60 : cache_range_element_properties(TypeCacheEntry *typentry)
1734 : {
1735 : /* load up subtype link if we didn't already */
1736 60 : if (typentry->rngelemtype == NULL &&
1737 44 : typentry->typtype == TYPTYPE_RANGE)
1738 44 : load_rangetype_info(typentry);
1739 :
1740 60 : if (typentry->rngelemtype != NULL)
1741 : {
1742 : TypeCacheEntry *elementry;
1743 :
1744 : /* might need to calculate subtype's hash function properties */
1745 60 : elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1746 : TYPECACHE_HASH_PROC |
1747 : TYPECACHE_HASH_EXTENDED_PROC);
1748 60 : if (OidIsValid(elementry->hash_proc))
1749 57 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1750 60 : if (OidIsValid(elementry->hash_extended_proc))
1751 57 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1752 : }
1753 60 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1754 60 : }
1755 :
1756 : static bool
1757 9 : multirange_element_has_hashing(TypeCacheEntry *typentry)
1758 : {
1759 9 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1760 9 : cache_multirange_element_properties(typentry);
1761 9 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1762 : }
1763 :
1764 : static bool
1765 0 : multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
1766 : {
1767 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1768 0 : cache_multirange_element_properties(typentry);
1769 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1770 : }
1771 :
1772 : static void
1773 9 : cache_multirange_element_properties(TypeCacheEntry *typentry)
1774 : {
1775 : /* load up range link if we didn't already */
1776 9 : if (typentry->rngtype == NULL &&
1777 0 : typentry->typtype == TYPTYPE_MULTIRANGE)
1778 0 : load_multirangetype_info(typentry);
1779 :
1780 9 : if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1781 : {
1782 : TypeCacheEntry *elementry;
1783 :
1784 : /* might need to calculate subtype's hash function properties */
1785 9 : elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1786 : TYPECACHE_HASH_PROC |
1787 : TYPECACHE_HASH_EXTENDED_PROC);
1788 9 : if (OidIsValid(elementry->hash_proc))
1789 6 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1790 9 : if (OidIsValid(elementry->hash_extended_proc))
1791 6 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1792 : }
1793 9 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1794 9 : }
1795 :
1796 : /*
1797 : * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1798 : * to store 'typmod'.
1799 : */
1800 : static void
1801 8868 : ensure_record_cache_typmod_slot_exists(int32 typmod)
1802 : {
1803 8868 : if (RecordCacheArray == NULL)
1804 : {
1805 3546 : RecordCacheArray = (RecordCacheArrayEntry *)
1806 3546 : MemoryContextAllocZero(CacheMemoryContext,
1807 : 64 * sizeof(RecordCacheArrayEntry));
1808 3546 : RecordCacheArrayLen = 64;
1809 : }
1810 :
1811 8868 : if (typmod >= RecordCacheArrayLen)
1812 : {
1813 0 : int32 newlen = pg_nextpower2_32(typmod + 1);
1814 :
1815 0 : RecordCacheArray = repalloc0_array(RecordCacheArray,
1816 : RecordCacheArrayEntry,
1817 : RecordCacheArrayLen,
1818 : newlen);
1819 0 : RecordCacheArrayLen = newlen;
1820 : }
1821 8868 : }
1822 :
1823 : /*
1824 : * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1825 : *
1826 : * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1827 : * hasn't had its refcount bumped.
1828 : */
1829 : static TupleDesc
1830 76477 : lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1831 : {
1832 76477 : if (type_id != RECORDOID)
1833 : {
1834 : /*
1835 : * It's a named composite type, so use the regular typcache.
1836 : */
1837 : TypeCacheEntry *typentry;
1838 :
1839 33096 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1840 33095 : if (typentry->tupDesc == NULL && !noError)
1841 0 : ereport(ERROR,
1842 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1843 : errmsg("type %s is not composite",
1844 : format_type_be(type_id))));
1845 33095 : return typentry->tupDesc;
1846 : }
1847 : else
1848 : {
1849 : /*
1850 : * It's a transient record type, so look in our record-type table.
1851 : */
1852 43381 : if (typmod >= 0)
1853 : {
1854 : /* It is already in our local cache? */
1855 43373 : if (typmod < RecordCacheArrayLen &&
1856 43370 : RecordCacheArray[typmod].tupdesc != NULL)
1857 43358 : return RecordCacheArray[typmod].tupdesc;
1858 :
1859 : /* Are we attached to a shared record typmod registry? */
1860 15 : if (CurrentSession->shared_typmod_registry != NULL)
1861 : {
1862 : SharedTypmodTableEntry *entry;
1863 :
1864 : /* Try to find it in the shared typmod index. */
1865 15 : entry = dshash_find(CurrentSession->shared_typmod_table,
1866 : &typmod, false);
1867 15 : if (entry != NULL)
1868 : {
1869 : TupleDesc tupdesc;
1870 :
1871 : tupdesc = (TupleDesc)
1872 15 : dsa_get_address(CurrentSession->area,
1873 : entry->shared_tupdesc);
1874 : Assert(typmod == tupdesc->tdtypmod);
1875 :
1876 : /* We may need to extend the local RecordCacheArray. */
1877 15 : ensure_record_cache_typmod_slot_exists(typmod);
1878 :
1879 : /*
1880 : * Our local array can now point directly to the TupleDesc
1881 : * in shared memory, which is non-reference-counted.
1882 : */
1883 15 : RecordCacheArray[typmod].tupdesc = tupdesc;
1884 : Assert(tupdesc->tdrefcount == -1);
1885 :
1886 : /*
1887 : * We don't share tupdesc identifiers across processes, so
1888 : * assign one locally.
1889 : */
1890 15 : RecordCacheArray[typmod].id = ++tupledesc_id_counter;
1891 :
1892 15 : dshash_release_lock(CurrentSession->shared_typmod_table,
1893 : entry);
1894 :
1895 15 : return RecordCacheArray[typmod].tupdesc;
1896 : }
1897 : }
1898 : }
1899 :
1900 8 : if (!noError)
1901 0 : ereport(ERROR,
1902 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1903 : errmsg("record type has not been registered")));
1904 8 : return NULL;
1905 : }
1906 : }
1907 :
1908 : /*
1909 : * lookup_rowtype_tupdesc
1910 : *
1911 : * Given a typeid/typmod that should describe a known composite type,
1912 : * return the tuple descriptor for the type. Will ereport on failure.
1913 : * (Use ereport because this is reachable with user-specified OIDs,
1914 : * for example from record_in().)
1915 : *
1916 : * Note: on success, we increment the refcount of the returned TupleDesc,
1917 : * and log the reference in CurrentResourceOwner. Caller must call
1918 : * ReleaseTupleDesc when done using the tupdesc. (There are some
1919 : * cases in which the returned tupdesc is not refcounted, in which
1920 : * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1921 : * the tupdesc is guaranteed to live till process exit.)
1922 : */
1923 : TupleDesc
1924 38637 : lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1925 : {
1926 : TupleDesc tupDesc;
1927 :
1928 38637 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1929 38636 : PinTupleDesc(tupDesc);
1930 38636 : return tupDesc;
1931 : }
1932 :
1933 : /*
1934 : * lookup_rowtype_tupdesc_noerror
1935 : *
1936 : * As above, but if the type is not a known composite type and noError
1937 : * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1938 : * type_id is passed, you'll get an ereport anyway.)
1939 : */
1940 : TupleDesc
1941 10 : lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1942 : {
1943 : TupleDesc tupDesc;
1944 :
1945 10 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1946 10 : if (tupDesc != NULL)
1947 10 : PinTupleDesc(tupDesc);
1948 10 : return tupDesc;
1949 : }
1950 :
1951 : /*
1952 : * lookup_rowtype_tupdesc_copy
1953 : *
1954 : * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1955 : * copied into the CurrentMemoryContext and is not reference-counted.
1956 : */
1957 : TupleDesc
1958 37821 : lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1959 : {
1960 : TupleDesc tmp;
1961 :
1962 37821 : tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1963 37821 : return CreateTupleDescCopyConstr(tmp);
1964 : }
1965 :
1966 : /*
1967 : * lookup_rowtype_tupdesc_domain
1968 : *
1969 : * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1970 : * a domain over a named composite type; so this is effectively equivalent to
1971 : * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1972 : * except for being a tad faster.
1973 : *
1974 : * Note: the reason we don't fold the look-through-domain behavior into plain
1975 : * lookup_rowtype_tupdesc() is that we want callers to know they might be
1976 : * dealing with a domain. Otherwise they might construct a tuple that should
1977 : * be of the domain type, but not apply domain constraints.
1978 : */
1979 : TupleDesc
1980 1644 : lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1981 : {
1982 : TupleDesc tupDesc;
1983 :
1984 1644 : if (type_id != RECORDOID)
1985 : {
1986 : /*
1987 : * Check for domain or named composite type. We might as well load
1988 : * whichever data is needed.
1989 : */
1990 : TypeCacheEntry *typentry;
1991 :
1992 1635 : typentry = lookup_type_cache(type_id,
1993 : TYPECACHE_TUPDESC |
1994 : TYPECACHE_DOMAIN_BASE_INFO);
1995 1635 : if (typentry->typtype == TYPTYPE_DOMAIN)
1996 10 : return lookup_rowtype_tupdesc_noerror(typentry->domainBaseType,
1997 : typentry->domainBaseTypmod,
1998 : noError);
1999 1625 : if (typentry->tupDesc == NULL && !noError)
2000 0 : ereport(ERROR,
2001 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2002 : errmsg("type %s is not composite",
2003 : format_type_be(type_id))));
2004 1625 : tupDesc = typentry->tupDesc;
2005 : }
2006 : else
2007 9 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2008 1634 : if (tupDesc != NULL)
2009 1626 : PinTupleDesc(tupDesc);
2010 1634 : return tupDesc;
2011 : }
2012 :
2013 : /*
2014 : * Hash function for the hash table of RecordCacheEntry.
2015 : */
2016 : static uint32
2017 230521 : record_type_typmod_hash(const void *data, size_t size)
2018 : {
2019 230521 : const RecordCacheEntry *entry = data;
2020 :
2021 230521 : return hashRowType(entry->tupdesc);
2022 : }
2023 :
2024 : /*
2025 : * Match function for the hash table of RecordCacheEntry.
2026 : */
2027 : static int
2028 217140 : record_type_typmod_compare(const void *a, const void *b, size_t size)
2029 : {
2030 217140 : const RecordCacheEntry *left = a;
2031 217140 : const RecordCacheEntry *right = b;
2032 :
2033 217140 : return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2034 : }
2035 :
2036 : /*
2037 : * assign_record_type_typmod
2038 : *
2039 : * Given a tuple descriptor for a RECORD type, find or create a cache entry
2040 : * for the type, and set the tupdesc's tdtypmod field to a value that will
2041 : * identify this cache entry to lookup_rowtype_tupdesc.
2042 : */
2043 : void
2044 221668 : assign_record_type_typmod(TupleDesc tupDesc)
2045 : {
2046 : RecordCacheEntry *recentry;
2047 : TupleDesc entDesc;
2048 : bool found;
2049 : MemoryContext oldcxt;
2050 :
2051 : Assert(tupDesc->tdtypeid == RECORDOID);
2052 :
2053 221668 : if (RecordCacheHash == NULL)
2054 : {
2055 : /* First time through: initialize the hash table */
2056 : HASHCTL ctl;
2057 :
2058 3546 : ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2059 3546 : ctl.entrysize = sizeof(RecordCacheEntry);
2060 3546 : ctl.hash = record_type_typmod_hash;
2061 3546 : ctl.match = record_type_typmod_compare;
2062 3546 : RecordCacheHash = hash_create("Record information cache", 64,
2063 : &ctl,
2064 : HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
2065 :
2066 : /* Also make sure CacheMemoryContext exists */
2067 3546 : if (!CacheMemoryContext)
2068 0 : CreateCacheMemoryContext();
2069 : }
2070 :
2071 : /*
2072 : * Find a hashtable entry for this tuple descriptor. We don't use
2073 : * HASH_ENTER yet, because if it's missing, we need to make sure that all
2074 : * the allocations succeed before we create the new entry.
2075 : */
2076 221668 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2077 : &tupDesc,
2078 : HASH_FIND, &found);
2079 221668 : if (found && recentry->tupdesc != NULL)
2080 : {
2081 212815 : tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2082 212815 : return;
2083 : }
2084 :
2085 : /* Not present, so need to manufacture an entry */
2086 8853 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2087 :
2088 : /* Look in the SharedRecordTypmodRegistry, if attached */
2089 8853 : entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
2090 8853 : if (entDesc == NULL)
2091 : {
2092 : /*
2093 : * Make sure we have room before we CreateTupleDescCopy() or advance
2094 : * NextRecordTypmod.
2095 : */
2096 8795 : ensure_record_cache_typmod_slot_exists(NextRecordTypmod);
2097 :
2098 : /* Reference-counted local cache only. */
2099 8795 : entDesc = CreateTupleDescCopy(tupDesc);
2100 8795 : entDesc->tdrefcount = 1;
2101 8795 : entDesc->tdtypmod = NextRecordTypmod++;
2102 : }
2103 : else
2104 : {
2105 58 : ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod);
2106 : }
2107 :
2108 8853 : RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
2109 :
2110 : /* Assign a unique tupdesc identifier, too. */
2111 8853 : RecordCacheArray[entDesc->tdtypmod].id = ++tupledesc_id_counter;
2112 :
2113 : /* Fully initialized; create the hash table entry */
2114 8853 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2115 : &tupDesc,
2116 : HASH_ENTER, NULL);
2117 8853 : recentry->tupdesc = entDesc;
2118 :
2119 : /* Update the caller's tuple descriptor. */
2120 8853 : tupDesc->tdtypmod = entDesc->tdtypmod;
2121 :
2122 8853 : MemoryContextSwitchTo(oldcxt);
2123 : }
2124 :
2125 : /*
2126 : * assign_record_type_identifier
2127 : *
2128 : * Get an identifier, which will be unique over the lifespan of this backend
2129 : * process, for the current tuple descriptor of the specified composite type.
2130 : * For named composite types, the value is guaranteed to change if the type's
2131 : * definition does. For registered RECORD types, the value will not change
2132 : * once assigned, since the registered type won't either. If an anonymous
2133 : * RECORD type is specified, we return a new identifier on each call.
2134 : */
2135 : uint64
2136 2816 : assign_record_type_identifier(Oid type_id, int32 typmod)
2137 : {
2138 2816 : if (type_id != RECORDOID)
2139 : {
2140 : /*
2141 : * It's a named composite type, so use the regular typcache.
2142 : */
2143 : TypeCacheEntry *typentry;
2144 :
2145 0 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2146 0 : if (typentry->tupDesc == NULL)
2147 0 : ereport(ERROR,
2148 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2149 : errmsg("type %s is not composite",
2150 : format_type_be(type_id))));
2151 : Assert(typentry->tupDesc_identifier != 0);
2152 0 : return typentry->tupDesc_identifier;
2153 : }
2154 : else
2155 : {
2156 : /*
2157 : * It's a transient record type, so look in our record-type table.
2158 : */
2159 2816 : if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2160 30 : RecordCacheArray[typmod].tupdesc != NULL)
2161 : {
2162 : Assert(RecordCacheArray[typmod].id != 0);
2163 30 : return RecordCacheArray[typmod].id;
2164 : }
2165 :
2166 : /* For anonymous or unrecognized record type, generate a new ID */
2167 2786 : return ++tupledesc_id_counter;
2168 : }
2169 : }
2170 :
2171 : /*
2172 : * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2173 : * This exists only to avoid exposing private innards of
2174 : * SharedRecordTypmodRegistry in a header.
2175 : */
2176 : size_t
2177 82 : SharedRecordTypmodRegistryEstimate(void)
2178 : {
2179 82 : return sizeof(SharedRecordTypmodRegistry);
2180 : }
2181 :
2182 : /*
2183 : * Initialize 'registry' in a pre-existing shared memory region, which must be
2184 : * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2185 : * bytes.
2186 : *
2187 : * 'area' will be used to allocate shared memory space as required for the
2188 : * typemod registration. The current process, expected to be a leader process
2189 : * in a parallel query, will be attached automatically and its current record
2190 : * types will be loaded into *registry. While attached, all calls to
2191 : * assign_record_type_typmod will use the shared registry. Worker backends
2192 : * will need to attach explicitly.
2193 : *
2194 : * Note that this function takes 'area' and 'segment' as arguments rather than
2195 : * accessing them via CurrentSession, because they aren't installed there
2196 : * until after this function runs.
2197 : */
2198 : void
2199 82 : SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry,
2200 : dsm_segment *segment,
2201 : dsa_area *area)
2202 : {
2203 : MemoryContext old_context;
2204 : dshash_table *record_table;
2205 : dshash_table *typmod_table;
2206 : int32 typmod;
2207 :
2208 : Assert(!IsParallelWorker());
2209 :
2210 : /* We can't already be attached to a shared registry. */
2211 : Assert(CurrentSession->shared_typmod_registry == NULL);
2212 : Assert(CurrentSession->shared_record_table == NULL);
2213 : Assert(CurrentSession->shared_typmod_table == NULL);
2214 :
2215 82 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2216 :
2217 : /* Create the hash table of tuple descriptors indexed by themselves. */
2218 82 : record_table = dshash_create(area, &srtr_record_table_params, area);
2219 :
2220 : /* Create the hash table of tuple descriptors indexed by typmod. */
2221 82 : typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2222 :
2223 82 : MemoryContextSwitchTo(old_context);
2224 :
2225 : /* Initialize the SharedRecordTypmodRegistry. */
2226 82 : registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2227 82 : registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2228 82 : pg_atomic_init_u32(®istry->next_typmod, NextRecordTypmod);
2229 :
2230 : /*
2231 : * Copy all entries from this backend's private registry into the shared
2232 : * registry.
2233 : */
2234 205 : for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2235 : {
2236 : SharedTypmodTableEntry *typmod_table_entry;
2237 : SharedRecordTableEntry *record_table_entry;
2238 : SharedRecordTableKey record_table_key;
2239 : dsa_pointer shared_dp;
2240 : TupleDesc tupdesc;
2241 : bool found;
2242 :
2243 123 : tupdesc = RecordCacheArray[typmod].tupdesc;
2244 123 : if (tupdesc == NULL)
2245 0 : continue;
2246 :
2247 : /* Copy the TupleDesc into shared memory. */
2248 123 : shared_dp = share_tupledesc(area, tupdesc, typmod);
2249 :
2250 : /* Insert into the typmod table. */
2251 123 : typmod_table_entry = dshash_find_or_insert(typmod_table,
2252 123 : &tupdesc->tdtypmod,
2253 : &found);
2254 123 : if (found)
2255 0 : elog(ERROR, "cannot create duplicate shared record typmod");
2256 123 : typmod_table_entry->typmod = tupdesc->tdtypmod;
2257 123 : typmod_table_entry->shared_tupdesc = shared_dp;
2258 123 : dshash_release_lock(typmod_table, typmod_table_entry);
2259 :
2260 : /* Insert into the record table. */
2261 123 : record_table_key.shared = false;
2262 123 : record_table_key.u.local_tupdesc = tupdesc;
2263 123 : record_table_entry = dshash_find_or_insert(record_table,
2264 : &record_table_key,
2265 : &found);
2266 123 : if (!found)
2267 : {
2268 123 : record_table_entry->key.shared = true;
2269 123 : record_table_entry->key.u.shared_tupdesc = shared_dp;
2270 : }
2271 123 : dshash_release_lock(record_table, record_table_entry);
2272 : }
2273 :
2274 : /*
2275 : * Set up the global state that will tell assign_record_type_typmod and
2276 : * lookup_rowtype_tupdesc_internal about the shared registry.
2277 : */
2278 82 : CurrentSession->shared_record_table = record_table;
2279 82 : CurrentSession->shared_typmod_table = typmod_table;
2280 82 : CurrentSession->shared_typmod_registry = registry;
2281 :
2282 : /*
2283 : * We install a detach hook in the leader, but only to handle cleanup on
2284 : * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2285 : * the memory, the leader process will use a shared registry until it
2286 : * exits.
2287 : */
2288 82 : on_dsm_detach(segment, shared_record_typmod_registry_detach, (Datum) 0);
2289 82 : }
2290 :
2291 : /*
2292 : * Attach to 'registry', which must have been initialized already by another
2293 : * backend. Future calls to assign_record_type_typmod and
2294 : * lookup_rowtype_tupdesc_internal will use the shared registry until the
2295 : * current session is detached.
2296 : */
2297 : void
2298 1493 : SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
2299 : {
2300 : MemoryContext old_context;
2301 : dshash_table *record_table;
2302 : dshash_table *typmod_table;
2303 :
2304 : Assert(IsParallelWorker());
2305 :
2306 : /* We can't already be attached to a shared registry. */
2307 : Assert(CurrentSession != NULL);
2308 : Assert(CurrentSession->segment != NULL);
2309 : Assert(CurrentSession->area != NULL);
2310 : Assert(CurrentSession->shared_typmod_registry == NULL);
2311 : Assert(CurrentSession->shared_record_table == NULL);
2312 : Assert(CurrentSession->shared_typmod_table == NULL);
2313 :
2314 : /*
2315 : * We can't already have typmods in our local cache, because they'd clash
2316 : * with those imported by SharedRecordTypmodRegistryInit. This should be
2317 : * a freshly started parallel worker. If we ever support worker
2318 : * recycling, a worker would need to zap its local cache in between
2319 : * servicing different queries, in order to be able to call this and
2320 : * synchronize typmods with a new leader; but that's problematic because
2321 : * we can't be very sure that record-typmod-related state hasn't escaped
2322 : * to anywhere else in the process.
2323 : */
2324 : Assert(NextRecordTypmod == 0);
2325 :
2326 1493 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2327 :
2328 : /* Attach to the two hash tables. */
2329 1493 : record_table = dshash_attach(CurrentSession->area,
2330 : &srtr_record_table_params,
2331 : registry->record_table_handle,
2332 1493 : CurrentSession->area);
2333 1493 : typmod_table = dshash_attach(CurrentSession->area,
2334 : &srtr_typmod_table_params,
2335 : registry->typmod_table_handle,
2336 : NULL);
2337 :
2338 1493 : MemoryContextSwitchTo(old_context);
2339 :
2340 : /*
2341 : * Set up detach hook to run at worker exit. Currently this is the same
2342 : * as the leader's detach hook, but in future they might need to be
2343 : * different.
2344 : */
2345 1493 : on_dsm_detach(CurrentSession->segment,
2346 : shared_record_typmod_registry_detach,
2347 : PointerGetDatum(registry));
2348 :
2349 : /*
2350 : * Set up the session state that will tell assign_record_type_typmod and
2351 : * lookup_rowtype_tupdesc_internal about the shared registry.
2352 : */
2353 1493 : CurrentSession->shared_typmod_registry = registry;
2354 1493 : CurrentSession->shared_record_table = record_table;
2355 1493 : CurrentSession->shared_typmod_table = typmod_table;
2356 1493 : }
2357 :
2358 : /*
2359 : * InvalidateCompositeTypeCacheEntry
2360 : * Invalidate particular TypeCacheEntry on Relcache inval callback
2361 : *
2362 : * Delete the cached tuple descriptor (if any) for the given composite
2363 : * type, and reset whatever info we have cached about the composite type's
2364 : * comparability.
2365 : */
2366 : static void
2367 5845 : InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
2368 : {
2369 : bool hadTupDescOrOpclass;
2370 :
2371 : Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2372 : OidIsValid(typentry->typrelid));
2373 :
2374 9953 : hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2375 4108 : (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2376 :
2377 : /* Delete tupdesc if we have it */
2378 5845 : if (typentry->tupDesc != NULL)
2379 : {
2380 : /*
2381 : * Release our refcount and free the tupdesc if none remain. We can't
2382 : * use DecrTupleDescRefCount here because this reference is not logged
2383 : * by the current resource owner.
2384 : */
2385 : Assert(typentry->tupDesc->tdrefcount > 0);
2386 1737 : if (--typentry->tupDesc->tdrefcount == 0)
2387 1409 : FreeTupleDesc(typentry->tupDesc);
2388 1737 : typentry->tupDesc = NULL;
2389 :
2390 : /*
2391 : * Also clear tupDesc_identifier, so that anyone watching it will
2392 : * realize that the tupdesc has changed.
2393 : */
2394 1737 : typentry->tupDesc_identifier = 0;
2395 : }
2396 :
2397 : /* Reset equality/comparison/hashing validity information */
2398 5845 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2399 :
2400 : /*
2401 : * Call delete_rel_type_cache_if_needed() if we actually cleared
2402 : * something.
2403 : */
2404 5845 : if (hadTupDescOrOpclass)
2405 1737 : delete_rel_type_cache_if_needed(typentry);
2406 5845 : }
2407 :
2408 : /*
2409 : * TypeCacheRelCallback
2410 : * Relcache inval callback function
2411 : *
2412 : * Delete the cached tuple descriptor (if any) for the given rel's composite
2413 : * type, or for all composite types if relid == InvalidOid. Also reset
2414 : * whatever info we have cached about the composite type's comparability.
2415 : *
2416 : * This is called when a relcache invalidation event occurs for the given
2417 : * relid. We can't use syscache to find a type corresponding to the given
2418 : * relation because the code can be called outside of transaction. Thus, we
2419 : * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2420 : */
2421 : static void
2422 1218203 : TypeCacheRelCallback(Datum arg, Oid relid)
2423 : {
2424 : TypeCacheEntry *typentry;
2425 :
2426 : /*
2427 : * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2428 : * callback wouldn't be registered
2429 : */
2430 1218203 : if (OidIsValid(relid))
2431 : {
2432 : RelIdToTypeIdCacheEntry *relentry;
2433 :
2434 : /*
2435 : * Find a RelIdToTypeIdCacheHash entry, which should exist as soon as
2436 : * corresponding typcache entry has something to clean.
2437 : */
2438 1217823 : relentry = (RelIdToTypeIdCacheEntry *) hash_search(RelIdToTypeIdCacheHash,
2439 : &relid,
2440 : HASH_FIND, NULL);
2441 :
2442 1217823 : if (relentry != NULL)
2443 : {
2444 5780 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
2445 5780 : &relentry->composite_typid,
2446 : HASH_FIND, NULL);
2447 :
2448 5780 : if (typentry != NULL)
2449 : {
2450 : Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2451 : Assert(relid == typentry->typrelid);
2452 :
2453 5780 : InvalidateCompositeTypeCacheEntry(typentry);
2454 : }
2455 : }
2456 :
2457 : /*
2458 : * Visit all the domain types sequentially. Typically, this shouldn't
2459 : * affect performance since domain types are less tended to bloat.
2460 : * Domain types are created manually, unlike composite types which are
2461 : * automatically created for every temporary table.
2462 : */
2463 1217823 : for (typentry = firstDomainTypeEntry;
2464 2073289 : typentry != NULL;
2465 855466 : typentry = typentry->nextDomain)
2466 : {
2467 : /*
2468 : * If it's domain over composite, reset flags. (We don't bother
2469 : * trying to determine whether the specific base type needs a
2470 : * reset.) Note that if we haven't determined whether the base
2471 : * type is composite, we don't need to reset anything.
2472 : */
2473 855466 : if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2474 0 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2475 : }
2476 : }
2477 : else
2478 : {
2479 : HASH_SEQ_STATUS status;
2480 :
2481 : /*
2482 : * Relid is invalid. By convention, we need to reset all composite
2483 : * types in cache. Also, we should reset flags for domain types, and
2484 : * we loop over all entries in hash, so, do it in a single scan.
2485 : */
2486 380 : hash_seq_init(&status, TypeCacheHash);
2487 1960 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2488 : {
2489 1580 : if (typentry->typtype == TYPTYPE_COMPOSITE)
2490 : {
2491 65 : InvalidateCompositeTypeCacheEntry(typentry);
2492 : }
2493 1515 : else if (typentry->typtype == TYPTYPE_DOMAIN)
2494 : {
2495 : /*
2496 : * If it's domain over composite, reset flags. (We don't
2497 : * bother trying to determine whether the specific base type
2498 : * needs a reset.) Note that if we haven't determined whether
2499 : * the base type is composite, we don't need to reset
2500 : * anything.
2501 : */
2502 18 : if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2503 0 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2504 : }
2505 : }
2506 : }
2507 1218203 : }
2508 :
2509 : /*
2510 : * TypeCacheTypCallback
2511 : * Syscache inval callback function
2512 : *
2513 : * This is called when a syscache invalidation event occurs for any
2514 : * pg_type row. If we have information cached about that type, mark
2515 : * it as needing to be reloaded.
2516 : */
2517 : static void
2518 402568 : TypeCacheTypCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
2519 : {
2520 : HASH_SEQ_STATUS status;
2521 : TypeCacheEntry *typentry;
2522 :
2523 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2524 :
2525 : /*
2526 : * By convention, zero hash value is passed to the callback as a sign that
2527 : * it's time to invalidate the whole cache. See sinval.c, inval.c and
2528 : * InvalidateSystemCachesExtended().
2529 : */
2530 402568 : if (hashvalue == 0)
2531 256 : hash_seq_init(&status, TypeCacheHash);
2532 : else
2533 402312 : hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2534 :
2535 808828 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2536 : {
2537 3692 : bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2538 :
2539 : Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2540 :
2541 : /*
2542 : * Mark the data obtained directly from pg_type as invalid. Also, if
2543 : * it's a domain, typnotnull might've changed, so we'll need to
2544 : * recalculate its constraints.
2545 : */
2546 3692 : typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2547 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS);
2548 :
2549 : /*
2550 : * Call delete_rel_type_cache_if_needed() if we cleaned
2551 : * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2552 : */
2553 3692 : if (hadPgTypeData)
2554 1951 : delete_rel_type_cache_if_needed(typentry);
2555 : }
2556 402568 : }
2557 :
2558 : /*
2559 : * TypeCacheOpcCallback
2560 : * Syscache inval callback function
2561 : *
2562 : * This is called when a syscache invalidation event occurs for any pg_opclass
2563 : * row. In principle we could probably just invalidate data dependent on the
2564 : * particular opclass, but since updates on pg_opclass are rare in production
2565 : * it doesn't seem worth a lot of complication: we just mark all cached data
2566 : * invalid.
2567 : *
2568 : * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2569 : * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2570 : * is not allowed to be used to add/drop the primary operators and functions
2571 : * of an opclass, only cross-type members of a family; and the latter sorts
2572 : * of members are not going to get cached here.
2573 : */
2574 : static void
2575 1380 : TypeCacheOpcCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
2576 : {
2577 : HASH_SEQ_STATUS status;
2578 : TypeCacheEntry *typentry;
2579 :
2580 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2581 1380 : hash_seq_init(&status, TypeCacheHash);
2582 8827 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2583 : {
2584 6067 : bool hadOpclass = (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2585 :
2586 : /* Reset equality/comparison/hashing validity information */
2587 6067 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2588 :
2589 : /*
2590 : * Call delete_rel_type_cache_if_needed() if we actually cleared some
2591 : * of TCFLAGS_OPERATOR_FLAGS.
2592 : */
2593 6067 : if (hadOpclass)
2594 1130 : delete_rel_type_cache_if_needed(typentry);
2595 : }
2596 1380 : }
2597 :
2598 : /*
2599 : * TypeCacheConstrCallback
2600 : * Syscache inval callback function
2601 : *
2602 : * This is called when a syscache invalidation event occurs for any
2603 : * pg_constraint row. We flush information about domain constraints
2604 : * when this happens.
2605 : *
2606 : * It's slightly annoying that we can't tell whether the inval event was for
2607 : * a domain constraint record or not; there's usually more update traffic
2608 : * for table constraints than domain constraints, so we'll do a lot of
2609 : * useless flushes. Still, this is better than the old no-caching-at-all
2610 : * approach to domain constraints.
2611 : */
2612 : static void
2613 118306 : TypeCacheConstrCallback(Datum arg, SysCacheIdentifier cacheid, uint32 hashvalue)
2614 : {
2615 : TypeCacheEntry *typentry;
2616 :
2617 : /*
2618 : * Because this is called very frequently, and typically very few of the
2619 : * typcache entries are for domains, we don't use hash_seq_search here.
2620 : * Instead we thread all the domain-type entries together so that we can
2621 : * visit them cheaply.
2622 : */
2623 118306 : for (typentry = firstDomainTypeEntry;
2624 217168 : typentry != NULL;
2625 98862 : typentry = typentry->nextDomain)
2626 : {
2627 : /* Reset domain constraint validity information */
2628 98862 : typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
2629 : }
2630 118306 : }
2631 :
2632 :
2633 : /*
2634 : * Check if given OID is part of the subset that's sortable by comparisons
2635 : */
2636 : static inline bool
2637 151929 : enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
2638 : {
2639 : Oid offset;
2640 :
2641 151929 : if (arg < enumdata->bitmap_base)
2642 0 : return false;
2643 151929 : offset = arg - enumdata->bitmap_base;
2644 151929 : if (offset > (Oid) INT_MAX)
2645 0 : return false;
2646 151929 : return bms_is_member((int) offset, enumdata->sorted_values);
2647 : }
2648 :
2649 :
2650 : /*
2651 : * compare_values_of_enum
2652 : * Compare two members of an enum type.
2653 : * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2654 : *
2655 : * Note: currently, the enumData cache is refreshed only if we are asked
2656 : * to compare an enum value that is not already in the cache. This is okay
2657 : * because there is no support for re-ordering existing values, so comparisons
2658 : * of previously cached values will return the right answer even if other
2659 : * values have been added since we last loaded the cache.
2660 : *
2661 : * Note: the enum logic has a special-case rule about even-numbered versus
2662 : * odd-numbered OIDs, but we take no account of that rule here; this
2663 : * routine shouldn't even get called when that rule applies.
2664 : */
2665 : int
2666 76213 : compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
2667 : {
2668 : TypeCacheEnumData *enumdata;
2669 : EnumItem *item1;
2670 : EnumItem *item2;
2671 :
2672 : /*
2673 : * Equal OIDs are certainly equal --- this case was probably handled by
2674 : * our caller, but we may as well check.
2675 : */
2676 76213 : if (arg1 == arg2)
2677 0 : return 0;
2678 :
2679 : /* Load up the cache if first time through */
2680 76213 : if (tcache->enumData == NULL)
2681 5 : load_enum_cache_data(tcache);
2682 76213 : enumdata = tcache->enumData;
2683 :
2684 : /*
2685 : * If both OIDs are known-sorted, we can just compare them directly.
2686 : */
2687 151929 : if (enum_known_sorted(enumdata, arg1) &&
2688 75716 : enum_known_sorted(enumdata, arg2))
2689 : {
2690 0 : if (arg1 < arg2)
2691 0 : return -1;
2692 : else
2693 0 : return 1;
2694 : }
2695 :
2696 : /*
2697 : * Slow path: we have to identify their actual sort-order positions.
2698 : */
2699 76213 : item1 = find_enumitem(enumdata, arg1);
2700 76213 : item2 = find_enumitem(enumdata, arg2);
2701 :
2702 76213 : if (item1 == NULL || item2 == NULL)
2703 : {
2704 : /*
2705 : * We couldn't find one or both values. That means the enum has
2706 : * changed under us, so re-initialize the cache and try again. We
2707 : * don't bother retrying the known-sorted case in this path.
2708 : */
2709 0 : load_enum_cache_data(tcache);
2710 0 : enumdata = tcache->enumData;
2711 :
2712 0 : item1 = find_enumitem(enumdata, arg1);
2713 0 : item2 = find_enumitem(enumdata, arg2);
2714 :
2715 : /*
2716 : * If we still can't find the values, complain: we must have corrupt
2717 : * data.
2718 : */
2719 0 : if (item1 == NULL)
2720 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2721 : arg1, format_type_be(tcache->type_id));
2722 0 : if (item2 == NULL)
2723 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2724 : arg2, format_type_be(tcache->type_id));
2725 : }
2726 :
2727 76213 : if (item1->sort_order < item2->sort_order)
2728 25768 : return -1;
2729 50445 : else if (item1->sort_order > item2->sort_order)
2730 50445 : return 1;
2731 : else
2732 0 : return 0;
2733 : }
2734 :
2735 : /*
2736 : * Load (or re-load) the enumData member of the typcache entry.
2737 : */
2738 : static void
2739 5 : load_enum_cache_data(TypeCacheEntry *tcache)
2740 : {
2741 : TypeCacheEnumData *enumdata;
2742 : Relation enum_rel;
2743 : SysScanDesc enum_scan;
2744 : HeapTuple enum_tuple;
2745 : ScanKeyData skey;
2746 : EnumItem *items;
2747 : int numitems;
2748 : int maxitems;
2749 : Oid bitmap_base;
2750 : Bitmapset *bitmap;
2751 : MemoryContext oldcxt;
2752 : int bm_size,
2753 : start_pos;
2754 :
2755 : /* Check that this is actually an enum */
2756 5 : if (tcache->typtype != TYPTYPE_ENUM)
2757 0 : ereport(ERROR,
2758 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2759 : errmsg("%s is not an enum",
2760 : format_type_be(tcache->type_id))));
2761 :
2762 : /*
2763 : * Read all the information for members of the enum type. We collect the
2764 : * info in working memory in the caller's context, and then transfer it to
2765 : * permanent memory in CacheMemoryContext. This minimizes the risk of
2766 : * leaking memory from CacheMemoryContext in the event of an error partway
2767 : * through.
2768 : */
2769 5 : maxitems = 64;
2770 5 : items = palloc_array(EnumItem, maxitems);
2771 5 : numitems = 0;
2772 :
2773 : /* Scan pg_enum for the members of the target enum type. */
2774 5 : ScanKeyInit(&skey,
2775 : Anum_pg_enum_enumtypid,
2776 : BTEqualStrategyNumber, F_OIDEQ,
2777 : ObjectIdGetDatum(tcache->type_id));
2778 :
2779 5 : enum_rel = table_open(EnumRelationId, AccessShareLock);
2780 5 : enum_scan = systable_beginscan(enum_rel,
2781 : EnumTypIdLabelIndexId,
2782 : true, NULL,
2783 : 1, &skey);
2784 :
2785 40 : while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2786 : {
2787 35 : Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2788 :
2789 35 : if (numitems >= maxitems)
2790 : {
2791 0 : maxitems *= 2;
2792 0 : items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2793 : }
2794 35 : items[numitems].enum_oid = en->oid;
2795 35 : items[numitems].sort_order = en->enumsortorder;
2796 35 : numitems++;
2797 : }
2798 :
2799 5 : systable_endscan(enum_scan);
2800 5 : table_close(enum_rel, AccessShareLock);
2801 :
2802 : /* Sort the items into OID order */
2803 5 : qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2804 :
2805 : /*
2806 : * Here, we create a bitmap listing a subset of the enum's OIDs that are
2807 : * known to be in order and can thus be compared with just OID comparison.
2808 : *
2809 : * The point of this is that the enum's initial OIDs were certainly in
2810 : * order, so there is some subset that can be compared via OID comparison;
2811 : * and we'd rather not do binary searches unnecessarily.
2812 : *
2813 : * This is somewhat heuristic, and might identify a subset of OIDs that
2814 : * isn't exactly what the type started with. That's okay as long as the
2815 : * subset is correctly sorted.
2816 : */
2817 5 : bitmap_base = InvalidOid;
2818 5 : bitmap = NULL;
2819 5 : bm_size = 1; /* only save sets of at least 2 OIDs */
2820 :
2821 11 : for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2822 : {
2823 : /*
2824 : * Identify longest sorted subsequence starting at start_pos
2825 : */
2826 11 : Bitmapset *this_bitmap = bms_make_singleton(0);
2827 11 : int this_bm_size = 1;
2828 11 : Oid start_oid = items[start_pos].enum_oid;
2829 11 : float4 prev_order = items[start_pos].sort_order;
2830 : int i;
2831 :
2832 74 : for (i = start_pos + 1; i < numitems; i++)
2833 : {
2834 : Oid offset;
2835 :
2836 63 : offset = items[i].enum_oid - start_oid;
2837 : /* quit if bitmap would be too large; cutoff is arbitrary */
2838 63 : if (offset >= 8192)
2839 0 : break;
2840 : /* include the item if it's in-order */
2841 63 : if (items[i].sort_order > prev_order)
2842 : {
2843 34 : prev_order = items[i].sort_order;
2844 34 : this_bitmap = bms_add_member(this_bitmap, (int) offset);
2845 34 : this_bm_size++;
2846 : }
2847 : }
2848 :
2849 : /* Remember it if larger than previous best */
2850 11 : if (this_bm_size > bm_size)
2851 : {
2852 5 : bms_free(bitmap);
2853 5 : bitmap_base = start_oid;
2854 5 : bitmap = this_bitmap;
2855 5 : bm_size = this_bm_size;
2856 : }
2857 : else
2858 6 : bms_free(this_bitmap);
2859 :
2860 : /*
2861 : * Done if it's not possible to find a longer sequence in the rest of
2862 : * the list. In typical cases this will happen on the first
2863 : * iteration, which is why we create the bitmaps on the fly instead of
2864 : * doing a second pass over the list.
2865 : */
2866 11 : if (bm_size >= (numitems - start_pos - 1))
2867 5 : break;
2868 : }
2869 :
2870 : /* OK, copy the data into CacheMemoryContext */
2871 5 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2872 : enumdata = (TypeCacheEnumData *)
2873 5 : palloc(offsetof(TypeCacheEnumData, enum_values) +
2874 5 : numitems * sizeof(EnumItem));
2875 5 : enumdata->bitmap_base = bitmap_base;
2876 5 : enumdata->sorted_values = bms_copy(bitmap);
2877 5 : enumdata->num_values = numitems;
2878 5 : memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2879 5 : MemoryContextSwitchTo(oldcxt);
2880 :
2881 5 : pfree(items);
2882 5 : bms_free(bitmap);
2883 :
2884 : /* And link the finished cache struct into the typcache */
2885 5 : if (tcache->enumData != NULL)
2886 0 : pfree(tcache->enumData);
2887 5 : tcache->enumData = enumdata;
2888 5 : }
2889 :
2890 : /*
2891 : * Locate the EnumItem with the given OID, if present
2892 : */
2893 : static EnumItem *
2894 152426 : find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
2895 : {
2896 : EnumItem srch;
2897 :
2898 : /* On some versions of Solaris, bsearch of zero items dumps core */
2899 152426 : if (enumdata->num_values <= 0)
2900 0 : return NULL;
2901 :
2902 152426 : srch.enum_oid = arg;
2903 152426 : return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2904 : sizeof(EnumItem), enum_oid_cmp);
2905 : }
2906 :
2907 : /*
2908 : * qsort comparison function for OID-ordered EnumItems
2909 : */
2910 : static int
2911 307189 : enum_oid_cmp(const void *left, const void *right)
2912 : {
2913 307189 : const EnumItem *l = (const EnumItem *) left;
2914 307189 : const EnumItem *r = (const EnumItem *) right;
2915 :
2916 307189 : return pg_cmp_u32(l->enum_oid, r->enum_oid);
2917 : }
2918 :
2919 : /*
2920 : * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2921 : * to the given value and return a dsa_pointer.
2922 : */
2923 : static dsa_pointer
2924 166 : share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2925 : {
2926 : dsa_pointer shared_dp;
2927 : TupleDesc shared;
2928 :
2929 166 : shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2930 166 : shared = (TupleDesc) dsa_get_address(area, shared_dp);
2931 166 : TupleDescCopy(shared, tupdesc);
2932 166 : shared->tdtypmod = typmod;
2933 :
2934 166 : return shared_dp;
2935 : }
2936 :
2937 : /*
2938 : * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2939 : * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2940 : * Tuple descriptors returned by this function are not reference counted, and
2941 : * will exist at least as long as the current backend remained attached to the
2942 : * current session.
2943 : */
2944 : static TupleDesc
2945 8853 : find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
2946 : {
2947 : TupleDesc result;
2948 : SharedRecordTableKey key;
2949 : SharedRecordTableEntry *record_table_entry;
2950 : SharedTypmodTableEntry *typmod_table_entry;
2951 : dsa_pointer shared_dp;
2952 : bool found;
2953 : uint32 typmod;
2954 :
2955 : /* If not even attached, nothing to do. */
2956 8853 : if (CurrentSession->shared_typmod_registry == NULL)
2957 8795 : return NULL;
2958 :
2959 : /* Try to find a matching tuple descriptor in the record table. */
2960 58 : key.shared = false;
2961 58 : key.u.local_tupdesc = tupdesc;
2962 : record_table_entry = (SharedRecordTableEntry *)
2963 58 : dshash_find(CurrentSession->shared_record_table, &key, false);
2964 58 : if (record_table_entry)
2965 : {
2966 : Assert(record_table_entry->key.shared);
2967 15 : dshash_release_lock(CurrentSession->shared_record_table,
2968 : record_table_entry);
2969 : result = (TupleDesc)
2970 15 : dsa_get_address(CurrentSession->area,
2971 : record_table_entry->key.u.shared_tupdesc);
2972 : Assert(result->tdrefcount == -1);
2973 :
2974 15 : return result;
2975 : }
2976 :
2977 : /* Allocate a new typmod number. This will be wasted if we error out. */
2978 43 : typmod = (int)
2979 43 : pg_atomic_fetch_add_u32(&CurrentSession->shared_typmod_registry->next_typmod,
2980 : 1);
2981 :
2982 : /* Copy the TupleDesc into shared memory. */
2983 43 : shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2984 :
2985 : /*
2986 : * Create an entry in the typmod table so that others will understand this
2987 : * typmod number.
2988 : */
2989 43 : PG_TRY();
2990 : {
2991 : typmod_table_entry = (SharedTypmodTableEntry *)
2992 43 : dshash_find_or_insert(CurrentSession->shared_typmod_table,
2993 : &typmod, &found);
2994 43 : if (found)
2995 0 : elog(ERROR, "cannot create duplicate shared record typmod");
2996 : }
2997 0 : PG_CATCH();
2998 : {
2999 0 : dsa_free(CurrentSession->area, shared_dp);
3000 0 : PG_RE_THROW();
3001 : }
3002 43 : PG_END_TRY();
3003 43 : typmod_table_entry->typmod = typmod;
3004 43 : typmod_table_entry->shared_tupdesc = shared_dp;
3005 43 : dshash_release_lock(CurrentSession->shared_typmod_table,
3006 : typmod_table_entry);
3007 :
3008 : /*
3009 : * Finally create an entry in the record table so others with matching
3010 : * tuple descriptors can reuse the typmod.
3011 : */
3012 : record_table_entry = (SharedRecordTableEntry *)
3013 43 : dshash_find_or_insert(CurrentSession->shared_record_table, &key,
3014 : &found);
3015 43 : if (found)
3016 : {
3017 : /*
3018 : * Someone concurrently inserted a matching tuple descriptor since the
3019 : * first time we checked. Use that one instead.
3020 : */
3021 0 : dshash_release_lock(CurrentSession->shared_record_table,
3022 : record_table_entry);
3023 :
3024 : /* Might as well free up the space used by the one we created. */
3025 0 : found = dshash_delete_key(CurrentSession->shared_typmod_table,
3026 : &typmod);
3027 : Assert(found);
3028 0 : dsa_free(CurrentSession->area, shared_dp);
3029 :
3030 : /* Return the one we found. */
3031 : Assert(record_table_entry->key.shared);
3032 : result = (TupleDesc)
3033 0 : dsa_get_address(CurrentSession->area,
3034 : record_table_entry->key.u.shared_tupdesc);
3035 : Assert(result->tdrefcount == -1);
3036 :
3037 0 : return result;
3038 : }
3039 :
3040 : /* Store it and return it. */
3041 43 : record_table_entry->key.shared = true;
3042 43 : record_table_entry->key.u.shared_tupdesc = shared_dp;
3043 43 : dshash_release_lock(CurrentSession->shared_record_table,
3044 : record_table_entry);
3045 : result = (TupleDesc)
3046 43 : dsa_get_address(CurrentSession->area, shared_dp);
3047 : Assert(result->tdrefcount == -1);
3048 :
3049 43 : return result;
3050 : }
3051 :
3052 : /*
3053 : * On-DSM-detach hook to forget about the current shared record typmod
3054 : * infrastructure. This is currently used by both leader and workers.
3055 : */
3056 : static void
3057 1575 : shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
3058 : {
3059 : /* Be cautious here: maybe we didn't finish initializing. */
3060 1575 : if (CurrentSession->shared_record_table != NULL)
3061 : {
3062 1575 : dshash_detach(CurrentSession->shared_record_table);
3063 1575 : CurrentSession->shared_record_table = NULL;
3064 : }
3065 1575 : if (CurrentSession->shared_typmod_table != NULL)
3066 : {
3067 1575 : dshash_detach(CurrentSession->shared_typmod_table);
3068 1575 : CurrentSession->shared_typmod_table = NULL;
3069 : }
3070 1575 : CurrentSession->shared_typmod_registry = NULL;
3071 1575 : }
3072 :
3073 : /*
3074 : * Insert RelIdToTypeIdCacheHash entry if needed.
3075 : */
3076 : static void
3077 410279 : insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
3078 : {
3079 : /* Immediately quit for non-composite types */
3080 410279 : if (typentry->typtype != TYPTYPE_COMPOSITE)
3081 359075 : return;
3082 :
3083 : /* typrelid should be given for composite types */
3084 : Assert(OidIsValid(typentry->typrelid));
3085 :
3086 : /*
3087 : * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3088 : * information indicating it should be here.
3089 : */
3090 51204 : if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3091 0 : (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3092 0 : typentry->tupDesc != NULL)
3093 : {
3094 : RelIdToTypeIdCacheEntry *relentry;
3095 : bool found;
3096 :
3097 51204 : relentry = (RelIdToTypeIdCacheEntry *) hash_search(RelIdToTypeIdCacheHash,
3098 51204 : &typentry->typrelid,
3099 : HASH_ENTER, &found);
3100 51204 : relentry->relid = typentry->typrelid;
3101 51204 : relentry->composite_typid = typentry->type_id;
3102 : }
3103 : }
3104 :
3105 : /*
3106 : * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3107 : * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3108 : * or tupDesc.
3109 : */
3110 : static void
3111 4818 : delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
3112 : {
3113 : #ifdef USE_ASSERT_CHECKING
3114 : int i;
3115 : bool is_in_progress = false;
3116 :
3117 : for (i = 0; i < in_progress_list_len; i++)
3118 : {
3119 : if (in_progress_list[i] == typentry->type_id)
3120 : {
3121 : is_in_progress = true;
3122 : break;
3123 : }
3124 : }
3125 : #endif
3126 :
3127 : /* Immediately quit for non-composite types */
3128 4818 : if (typentry->typtype != TYPTYPE_COMPOSITE)
3129 1940 : return;
3130 :
3131 : /* typrelid should be given for composite types */
3132 : Assert(OidIsValid(typentry->typrelid));
3133 :
3134 : /*
3135 : * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3136 : * information indicating entry should be still there.
3137 : */
3138 2878 : if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3139 1518 : !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3140 1476 : typentry->tupDesc == NULL)
3141 : {
3142 : bool found;
3143 :
3144 1137 : (void) hash_search(RelIdToTypeIdCacheHash,
3145 1137 : &typentry->typrelid,
3146 : HASH_REMOVE, &found);
3147 : Assert(found || is_in_progress);
3148 : }
3149 : else
3150 : {
3151 : #ifdef USE_ASSERT_CHECKING
3152 : /*
3153 : * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3154 : * entry if it should exist.
3155 : */
3156 : bool found;
3157 :
3158 : if (!is_in_progress)
3159 : {
3160 : (void) hash_search(RelIdToTypeIdCacheHash,
3161 : &typentry->typrelid,
3162 : HASH_FIND, &found);
3163 : Assert(found);
3164 : }
3165 : #endif
3166 : }
3167 : }
3168 :
3169 : /*
3170 : * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3171 : * entries, marked as in-progress by lookup_type_cache(). It may happen
3172 : * in case of an error or interruption during the lookup_type_cache() call.
3173 : */
3174 : static void
3175 542114 : finalize_in_progress_typentries(void)
3176 : {
3177 : int i;
3178 :
3179 542115 : for (i = 0; i < in_progress_list_len; i++)
3180 : {
3181 : TypeCacheEntry *typentry;
3182 :
3183 1 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
3184 1 : &in_progress_list[i],
3185 : HASH_FIND, NULL);
3186 1 : if (typentry)
3187 1 : insert_rel_type_cache_if_needed(typentry);
3188 : }
3189 :
3190 542114 : in_progress_list_len = 0;
3191 542114 : }
3192 :
3193 : void
3194 532033 : AtEOXact_TypeCache(void)
3195 : {
3196 532033 : finalize_in_progress_typentries();
3197 532033 : }
3198 :
3199 : void
3200 10081 : AtEOSubXact_TypeCache(void)
3201 : {
3202 10081 : finalize_in_progress_typentries();
3203 10081 : }
|