Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * typcache.c
4 : * POSTGRES type cache code
5 : *
6 : * The type cache exists to speed lookup of certain information about data
7 : * types that is not directly available from a type's pg_type row. For
8 : * example, we use a type's default btree opclass, or the default hash
9 : * opclass if no btree opclass exists, to determine which operators should
10 : * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11 : *
12 : * Several seemingly-odd choices have been made to support use of the type
13 : * cache by generic array and record handling routines, such as array_eq(),
14 : * record_cmp(), and hash_array(). Because those routines are used as index
15 : * support operations, they cannot leak memory. To allow them to execute
16 : * efficiently, all information that they would like to re-use across calls
17 : * is kept in the type cache.
18 : *
19 : * Once created, a type cache entry lives as long as the backend does, so
20 : * there is no need for a call to release a cache entry. If the type is
21 : * dropped, the cache entry simply becomes wasted storage. This is not
22 : * expected to happen often, and assuming that typcache entries are good
23 : * permanently allows caching pointers to them in long-lived places.
24 : *
25 : * We have some provisions for updating cache entries if the stored data
26 : * becomes obsolete. Core data extracted from the pg_type row is updated
27 : * when we detect updates to pg_type. Information dependent on opclasses is
28 : * cleared if we detect updates to pg_opclass. We also support clearing the
29 : * tuple descriptor and operator/function parts of a rowtype's cache entry,
30 : * since those may need to change as a consequence of ALTER TABLE. Domain
31 : * constraint changes are also tracked properly.
32 : *
33 : *
34 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
35 : * Portions Copyright (c) 1994, Regents of the University of California
36 : *
37 : * IDENTIFICATION
38 : * src/backend/utils/cache/typcache.c
39 : *
40 : *-------------------------------------------------------------------------
41 : */
42 : #include "postgres.h"
43 :
44 : #include <limits.h>
45 :
46 : #include "access/hash.h"
47 : #include "access/htup_details.h"
48 : #include "access/nbtree.h"
49 : #include "access/parallel.h"
50 : #include "access/relation.h"
51 : #include "access/session.h"
52 : #include "access/table.h"
53 : #include "catalog/pg_am.h"
54 : #include "catalog/pg_constraint.h"
55 : #include "catalog/pg_enum.h"
56 : #include "catalog/pg_operator.h"
57 : #include "catalog/pg_range.h"
58 : #include "catalog/pg_type.h"
59 : #include "commands/defrem.h"
60 : #include "common/int.h"
61 : #include "executor/executor.h"
62 : #include "lib/dshash.h"
63 : #include "optimizer/optimizer.h"
64 : #include "port/pg_bitutils.h"
65 : #include "storage/lwlock.h"
66 : #include "utils/builtins.h"
67 : #include "utils/catcache.h"
68 : #include "utils/fmgroids.h"
69 : #include "utils/injection_point.h"
70 : #include "utils/inval.h"
71 : #include "utils/lsyscache.h"
72 : #include "utils/memutils.h"
73 : #include "utils/rel.h"
74 : #include "utils/syscache.h"
75 : #include "utils/typcache.h"
76 :
77 :
78 : /* The main type cache hashtable searched by lookup_type_cache */
79 : static HTAB *TypeCacheHash = NULL;
80 :
81 : /*
82 : * The mapping of relation's OID to the corresponding composite type OID.
83 : * We're keeping the map entry when the corresponding typentry has something
84 : * to clear i.e it has either TCFLAGS_HAVE_PG_TYPE_DATA, or
85 : * TCFLAGS_OPERATOR_FLAGS, or tupdesc.
86 : */
87 : static HTAB *RelIdToTypeIdCacheHash = NULL;
88 :
89 : typedef struct RelIdToTypeIdCacheEntry
90 : {
91 : Oid relid; /* OID of the relation */
92 : Oid composite_typid; /* OID of the relation's composite type */
93 : } RelIdToTypeIdCacheEntry;
94 :
95 : /* List of type cache entries for domain types */
96 : static TypeCacheEntry *firstDomainTypeEntry = NULL;
97 :
98 : /* Private flag bits in the TypeCacheEntry.flags field */
99 : #define TCFLAGS_HAVE_PG_TYPE_DATA 0x000001
100 : #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x000002
101 : #define TCFLAGS_CHECKED_HASH_OPCLASS 0x000004
102 : #define TCFLAGS_CHECKED_EQ_OPR 0x000008
103 : #define TCFLAGS_CHECKED_LT_OPR 0x000010
104 : #define TCFLAGS_CHECKED_GT_OPR 0x000020
105 : #define TCFLAGS_CHECKED_CMP_PROC 0x000040
106 : #define TCFLAGS_CHECKED_HASH_PROC 0x000080
107 : #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC 0x000100
108 : #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x000200
109 : #define TCFLAGS_HAVE_ELEM_EQUALITY 0x000400
110 : #define TCFLAGS_HAVE_ELEM_COMPARE 0x000800
111 : #define TCFLAGS_HAVE_ELEM_HASHING 0x001000
112 : #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING 0x002000
113 : #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x004000
114 : #define TCFLAGS_HAVE_FIELD_EQUALITY 0x008000
115 : #define TCFLAGS_HAVE_FIELD_COMPARE 0x010000
116 : #define TCFLAGS_HAVE_FIELD_HASHING 0x020000
117 : #define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
118 : #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x080000
119 : #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE 0x100000
120 :
121 : /* The flags associated with equality/comparison/hashing are all but these: */
122 : #define TCFLAGS_OPERATOR_FLAGS \
123 : (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
124 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
125 : TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
126 :
127 : /*
128 : * Data stored about a domain type's constraints. Note that we do not create
129 : * this struct for the common case of a constraint-less domain; we just set
130 : * domainData to NULL to indicate that.
131 : *
132 : * Within a DomainConstraintCache, we store expression plan trees, but the
133 : * check_exprstate fields of the DomainConstraintState nodes are just NULL.
134 : * When needed, expression evaluation nodes are built by flat-copying the
135 : * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
136 : * Such a node tree is not part of the DomainConstraintCache, but is
137 : * considered to belong to a DomainConstraintRef.
138 : */
139 : struct DomainConstraintCache
140 : {
141 : List *constraints; /* list of DomainConstraintState nodes */
142 : MemoryContext dccContext; /* memory context holding all associated data */
143 : long dccRefCount; /* number of references to this struct */
144 : };
145 :
146 : /* Private information to support comparisons of enum values */
147 : typedef struct
148 : {
149 : Oid enum_oid; /* OID of one enum value */
150 : float4 sort_order; /* its sort position */
151 : } EnumItem;
152 :
153 : typedef struct TypeCacheEnumData
154 : {
155 : Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
156 : Bitmapset *sorted_values; /* Set of OIDs known to be in order */
157 : int num_values; /* total number of values in enum */
158 : EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER];
159 : } TypeCacheEnumData;
160 :
161 : /*
162 : * We use a separate table for storing the definitions of non-anonymous
163 : * record types. Once defined, a record type will be remembered for the
164 : * life of the backend. Subsequent uses of the "same" record type (where
165 : * sameness means equalRowTypes) will refer to the existing table entry.
166 : *
167 : * Stored record types are remembered in a linear array of TupleDescs,
168 : * which can be indexed quickly with the assigned typmod. There is also
169 : * a hash table to speed searches for matching TupleDescs.
170 : */
171 :
172 : typedef struct RecordCacheEntry
173 : {
174 : TupleDesc tupdesc;
175 : } RecordCacheEntry;
176 :
177 : /*
178 : * To deal with non-anonymous record types that are exchanged by backends
179 : * involved in a parallel query, we also need a shared version of the above.
180 : */
181 : struct SharedRecordTypmodRegistry
182 : {
183 : /* A hash table for finding a matching TupleDesc. */
184 : dshash_table_handle record_table_handle;
185 : /* A hash table for finding a TupleDesc by typmod. */
186 : dshash_table_handle typmod_table_handle;
187 : /* A source of new record typmod numbers. */
188 : pg_atomic_uint32 next_typmod;
189 : };
190 :
191 : /*
192 : * When using shared tuple descriptors as hash table keys we need a way to be
193 : * able to search for an equal shared TupleDesc using a backend-local
194 : * TupleDesc. So we use this type which can hold either, and hash and compare
195 : * functions that know how to handle both.
196 : */
197 : typedef struct SharedRecordTableKey
198 : {
199 : union
200 : {
201 : TupleDesc local_tupdesc;
202 : dsa_pointer shared_tupdesc;
203 : } u;
204 : bool shared;
205 : } SharedRecordTableKey;
206 :
207 : /*
208 : * The shared version of RecordCacheEntry. This lets us look up a typmod
209 : * using a TupleDesc which may be in local or shared memory.
210 : */
211 : typedef struct SharedRecordTableEntry
212 : {
213 : SharedRecordTableKey key;
214 : } SharedRecordTableEntry;
215 :
216 : /*
217 : * An entry in SharedRecordTypmodRegistry's typmod table. This lets us look
218 : * up a TupleDesc in shared memory using a typmod.
219 : */
220 : typedef struct SharedTypmodTableEntry
221 : {
222 : uint32 typmod;
223 : dsa_pointer shared_tupdesc;
224 : } SharedTypmodTableEntry;
225 :
226 : static Oid *in_progress_list;
227 : static int in_progress_list_len;
228 : static int in_progress_list_maxlen;
229 :
230 : /*
231 : * A comparator function for SharedRecordTableKey.
232 : */
233 : static int
234 122 : shared_record_table_compare(const void *a, const void *b, size_t size,
235 : void *arg)
236 : {
237 122 : dsa_area *area = (dsa_area *) arg;
238 122 : SharedRecordTableKey *k1 = (SharedRecordTableKey *) a;
239 122 : SharedRecordTableKey *k2 = (SharedRecordTableKey *) b;
240 : TupleDesc t1;
241 : TupleDesc t2;
242 :
243 122 : if (k1->shared)
244 0 : t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
245 : else
246 122 : t1 = k1->u.local_tupdesc;
247 :
248 122 : if (k2->shared)
249 122 : t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
250 : else
251 0 : t2 = k2->u.local_tupdesc;
252 :
253 122 : return equalRowTypes(t1, t2) ? 0 : 1;
254 : }
255 :
256 : /*
257 : * A hash function for SharedRecordTableKey.
258 : */
259 : static uint32
260 238 : shared_record_table_hash(const void *a, size_t size, void *arg)
261 : {
262 238 : dsa_area *area = (dsa_area *) arg;
263 238 : SharedRecordTableKey *k = (SharedRecordTableKey *) a;
264 : TupleDesc t;
265 :
266 238 : if (k->shared)
267 0 : t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
268 : else
269 238 : t = k->u.local_tupdesc;
270 :
271 238 : return hashRowType(t);
272 : }
273 :
274 : /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
275 : static const dshash_parameters srtr_record_table_params = {
276 : sizeof(SharedRecordTableKey), /* unused */
277 : sizeof(SharedRecordTableEntry),
278 : shared_record_table_compare,
279 : shared_record_table_hash,
280 : dshash_memcpy,
281 : LWTRANCHE_PER_SESSION_RECORD_TYPE
282 : };
283 :
284 : /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
285 : static const dshash_parameters srtr_typmod_table_params = {
286 : sizeof(uint32),
287 : sizeof(SharedTypmodTableEntry),
288 : dshash_memcmp,
289 : dshash_memhash,
290 : dshash_memcpy,
291 : LWTRANCHE_PER_SESSION_RECORD_TYPMOD
292 : };
293 :
294 : /* hashtable for recognizing registered record types */
295 : static HTAB *RecordCacheHash = NULL;
296 :
297 : typedef struct RecordCacheArrayEntry
298 : {
299 : uint64 id;
300 : TupleDesc tupdesc;
301 : } RecordCacheArrayEntry;
302 :
303 : /* array of info about registered record types, indexed by assigned typmod */
304 : static RecordCacheArrayEntry *RecordCacheArray = NULL;
305 : static int32 RecordCacheArrayLen = 0; /* allocated length of above array */
306 : static int32 NextRecordTypmod = 0; /* number of entries used */
307 :
308 : /*
309 : * Process-wide counter for generating unique tupledesc identifiers.
310 : * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
311 : * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
312 : */
313 : static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER;
314 :
315 : static void load_typcache_tupdesc(TypeCacheEntry *typentry);
316 : static void load_rangetype_info(TypeCacheEntry *typentry);
317 : static void load_multirangetype_info(TypeCacheEntry *typentry);
318 : static void load_domaintype_info(TypeCacheEntry *typentry);
319 : static int dcs_cmp(const void *a, const void *b);
320 : static void decr_dcc_refcount(DomainConstraintCache *dcc);
321 : static void dccref_deletion_callback(void *arg);
322 : static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
323 : static bool array_element_has_equality(TypeCacheEntry *typentry);
324 : static bool array_element_has_compare(TypeCacheEntry *typentry);
325 : static bool array_element_has_hashing(TypeCacheEntry *typentry);
326 : static bool array_element_has_extended_hashing(TypeCacheEntry *typentry);
327 : static void cache_array_element_properties(TypeCacheEntry *typentry);
328 : static bool record_fields_have_equality(TypeCacheEntry *typentry);
329 : static bool record_fields_have_compare(TypeCacheEntry *typentry);
330 : static bool record_fields_have_hashing(TypeCacheEntry *typentry);
331 : static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry);
332 : static void cache_record_field_properties(TypeCacheEntry *typentry);
333 : static bool range_element_has_hashing(TypeCacheEntry *typentry);
334 : static bool range_element_has_extended_hashing(TypeCacheEntry *typentry);
335 : static void cache_range_element_properties(TypeCacheEntry *typentry);
336 : static bool multirange_element_has_hashing(TypeCacheEntry *typentry);
337 : static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry);
338 : static void cache_multirange_element_properties(TypeCacheEntry *typentry);
339 : static void TypeCacheRelCallback(Datum arg, Oid relid);
340 : static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
341 : static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
342 : static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
343 : static void load_enum_cache_data(TypeCacheEntry *tcache);
344 : static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
345 : static int enum_oid_cmp(const void *left, const void *right);
346 : static void shared_record_typmod_registry_detach(dsm_segment *segment,
347 : Datum datum);
348 : static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc);
349 : static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
350 : uint32 typmod);
351 : static void insert_rel_type_cache_if_needed(TypeCacheEntry *typentry);
352 : static void delete_rel_type_cache_if_needed(TypeCacheEntry *typentry);
353 :
354 :
355 : /*
356 : * Hash function compatible with one-arg system cache hash function.
357 : */
358 : static uint32
359 735794 : type_cache_syshash(const void *key, Size keysize)
360 : {
361 : Assert(keysize == sizeof(Oid));
362 735794 : return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
363 : }
364 :
365 : /*
366 : * lookup_type_cache
367 : *
368 : * Fetch the type cache entry for the specified datatype, and make sure that
369 : * all the fields requested by bits in 'flags' are valid.
370 : *
371 : * The result is never NULL --- we will ereport() if the passed type OID is
372 : * invalid. Note however that we may fail to find one or more of the
373 : * values requested by 'flags'; the caller needs to check whether the fields
374 : * are InvalidOid or not.
375 : *
376 : * Note that while filling TypeCacheEntry we might process concurrent
377 : * invalidation messages, causing our not-yet-filled TypeCacheEntry to be
378 : * invalidated. In this case, we typically only clear flags while values are
379 : * still available for the caller. It's expected that the caller holds
380 : * enough locks on type-depending objects that the values are still relevant.
381 : * It's also important that the tupdesc is filled after all other
382 : * TypeCacheEntry items for TYPTYPE_COMPOSITE. So, tupdesc can't get
383 : * invalidated during the lookup_type_cache() call.
384 : */
385 : TypeCacheEntry *
386 663380 : lookup_type_cache(Oid type_id, int flags)
387 : {
388 : TypeCacheEntry *typentry;
389 : bool found;
390 : int in_progress_offset;
391 :
392 663380 : if (TypeCacheHash == NULL)
393 : {
394 : /* First time through: initialize the hash table */
395 : HASHCTL ctl;
396 : int allocsize;
397 :
398 9358 : ctl.keysize = sizeof(Oid);
399 9358 : ctl.entrysize = sizeof(TypeCacheEntry);
400 :
401 : /*
402 : * TypeCacheEntry takes hash value from the system cache. For
403 : * TypeCacheHash we use the same hash in order to speedup search by
404 : * hash value. This is used by hash_seq_init_with_hash_value().
405 : */
406 9358 : ctl.hash = type_cache_syshash;
407 :
408 9358 : TypeCacheHash = hash_create("Type information cache", 64,
409 : &ctl, HASH_ELEM | HASH_FUNCTION);
410 :
411 : Assert(RelIdToTypeIdCacheHash == NULL);
412 :
413 9358 : ctl.keysize = sizeof(Oid);
414 9358 : ctl.entrysize = sizeof(RelIdToTypeIdCacheEntry);
415 9358 : RelIdToTypeIdCacheHash = hash_create("Map from relid to OID of cached composite type", 64,
416 : &ctl, HASH_ELEM | HASH_BLOBS);
417 :
418 : /* Also set up callbacks for SI invalidations */
419 9358 : CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
420 9358 : CacheRegisterSyscacheCallback(TYPEOID, TypeCacheTypCallback, (Datum) 0);
421 9358 : CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
422 9358 : CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
423 :
424 : /* Also make sure CacheMemoryContext exists */
425 9358 : if (!CacheMemoryContext)
426 0 : CreateCacheMemoryContext();
427 :
428 : /*
429 : * reserve enough in_progress_list slots for many cases
430 : */
431 9358 : allocsize = 4;
432 9358 : in_progress_list =
433 9358 : MemoryContextAlloc(CacheMemoryContext,
434 : allocsize * sizeof(*in_progress_list));
435 9358 : in_progress_list_maxlen = allocsize;
436 : }
437 :
438 : Assert(TypeCacheHash != NULL && RelIdToTypeIdCacheHash != NULL);
439 :
440 : /* Register to catch invalidation messages */
441 663380 : if (in_progress_list_len >= in_progress_list_maxlen)
442 : {
443 : int allocsize;
444 :
445 0 : allocsize = in_progress_list_maxlen * 2;
446 0 : in_progress_list = repalloc(in_progress_list,
447 : allocsize * sizeof(*in_progress_list));
448 0 : in_progress_list_maxlen = allocsize;
449 : }
450 663380 : in_progress_offset = in_progress_list_len++;
451 663380 : in_progress_list[in_progress_offset] = type_id;
452 :
453 : /* Try to look up an existing entry */
454 663380 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
455 : &type_id,
456 : HASH_FIND, NULL);
457 663380 : if (typentry == NULL)
458 : {
459 : /*
460 : * If we didn't find one, we want to make one. But first look up the
461 : * pg_type row, just to make sure we don't make a cache entry for an
462 : * invalid type OID. If the type OID is not valid, present a
463 : * user-facing error, since some code paths such as domain_in() allow
464 : * this function to be reached with a user-supplied OID.
465 : */
466 : HeapTuple tp;
467 : Form_pg_type typtup;
468 :
469 31872 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
470 31872 : if (!HeapTupleIsValid(tp))
471 0 : ereport(ERROR,
472 : (errcode(ERRCODE_UNDEFINED_OBJECT),
473 : errmsg("type with OID %u does not exist", type_id)));
474 31872 : typtup = (Form_pg_type) GETSTRUCT(tp);
475 31872 : if (!typtup->typisdefined)
476 0 : ereport(ERROR,
477 : (errcode(ERRCODE_UNDEFINED_OBJECT),
478 : errmsg("type \"%s\" is only a shell",
479 : NameStr(typtup->typname))));
480 :
481 : /* Now make the typcache entry */
482 31872 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
483 : &type_id,
484 : HASH_ENTER, &found);
485 : Assert(!found); /* it wasn't there a moment ago */
486 :
487 1976064 : MemSet(typentry, 0, sizeof(TypeCacheEntry));
488 :
489 : /* These fields can never change, by definition */
490 31872 : typentry->type_id = type_id;
491 31872 : typentry->type_id_hash = get_hash_value(TypeCacheHash, &type_id);
492 :
493 : /* Keep this part in sync with the code below */
494 31872 : typentry->typlen = typtup->typlen;
495 31872 : typentry->typbyval = typtup->typbyval;
496 31872 : typentry->typalign = typtup->typalign;
497 31872 : typentry->typstorage = typtup->typstorage;
498 31872 : typentry->typtype = typtup->typtype;
499 31872 : typentry->typrelid = typtup->typrelid;
500 31872 : typentry->typsubscript = typtup->typsubscript;
501 31872 : typentry->typelem = typtup->typelem;
502 31872 : typentry->typcollation = typtup->typcollation;
503 31872 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
504 :
505 : /* If it's a domain, immediately thread it into the domain cache list */
506 31872 : if (typentry->typtype == TYPTYPE_DOMAIN)
507 : {
508 1424 : typentry->nextDomain = firstDomainTypeEntry;
509 1424 : firstDomainTypeEntry = typentry;
510 : }
511 :
512 31872 : ReleaseSysCache(tp);
513 : }
514 631508 : else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
515 : {
516 : /*
517 : * We have an entry, but its pg_type row got changed, so reload the
518 : * data obtained directly from pg_type.
519 : */
520 : HeapTuple tp;
521 : Form_pg_type typtup;
522 :
523 466 : tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
524 466 : if (!HeapTupleIsValid(tp))
525 0 : ereport(ERROR,
526 : (errcode(ERRCODE_UNDEFINED_OBJECT),
527 : errmsg("type with OID %u does not exist", type_id)));
528 466 : typtup = (Form_pg_type) GETSTRUCT(tp);
529 466 : if (!typtup->typisdefined)
530 0 : ereport(ERROR,
531 : (errcode(ERRCODE_UNDEFINED_OBJECT),
532 : errmsg("type \"%s\" is only a shell",
533 : NameStr(typtup->typname))));
534 :
535 : /*
536 : * Keep this part in sync with the code above. Many of these fields
537 : * shouldn't ever change, particularly typtype, but copy 'em anyway.
538 : */
539 466 : typentry->typlen = typtup->typlen;
540 466 : typentry->typbyval = typtup->typbyval;
541 466 : typentry->typalign = typtup->typalign;
542 466 : typentry->typstorage = typtup->typstorage;
543 466 : typentry->typtype = typtup->typtype;
544 466 : typentry->typrelid = typtup->typrelid;
545 466 : typentry->typsubscript = typtup->typsubscript;
546 466 : typentry->typelem = typtup->typelem;
547 466 : typentry->typcollation = typtup->typcollation;
548 466 : typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
549 :
550 466 : ReleaseSysCache(tp);
551 : }
552 :
553 : /*
554 : * Look up opclasses if we haven't already and any dependent info is
555 : * requested.
556 : */
557 663380 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
558 : TYPECACHE_CMP_PROC |
559 : TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
560 412942 : TYPECACHE_BTREE_OPFAMILY)) &&
561 412942 : !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
562 : {
563 : Oid opclass;
564 :
565 27670 : opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
566 27670 : if (OidIsValid(opclass))
567 : {
568 26876 : typentry->btree_opf = get_opclass_family(opclass);
569 26876 : typentry->btree_opintype = get_opclass_input_type(opclass);
570 : }
571 : else
572 : {
573 794 : typentry->btree_opf = typentry->btree_opintype = InvalidOid;
574 : }
575 :
576 : /*
577 : * Reset information derived from btree opclass. Note in particular
578 : * that we'll redetermine the eq_opr even if we previously found one;
579 : * this matters in case a btree opclass has been added to a type that
580 : * previously had only a hash opclass.
581 : */
582 27670 : typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
583 : TCFLAGS_CHECKED_LT_OPR |
584 : TCFLAGS_CHECKED_GT_OPR |
585 : TCFLAGS_CHECKED_CMP_PROC);
586 27670 : typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
587 : }
588 :
589 : /*
590 : * If we need to look up equality operator, and there's no btree opclass,
591 : * force lookup of hash opclass.
592 : */
593 663380 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
594 389078 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
595 27456 : typentry->btree_opf == InvalidOid)
596 794 : flags |= TYPECACHE_HASH_OPFAMILY;
597 :
598 663380 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
599 : TYPECACHE_HASH_EXTENDED_PROC |
600 : TYPECACHE_HASH_EXTENDED_PROC_FINFO |
601 269420 : TYPECACHE_HASH_OPFAMILY)) &&
602 269420 : !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
603 : {
604 : Oid opclass;
605 :
606 18692 : opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
607 18692 : if (OidIsValid(opclass))
608 : {
609 18474 : typentry->hash_opf = get_opclass_family(opclass);
610 18474 : typentry->hash_opintype = get_opclass_input_type(opclass);
611 : }
612 : else
613 : {
614 218 : typentry->hash_opf = typentry->hash_opintype = InvalidOid;
615 : }
616 :
617 : /*
618 : * Reset information derived from hash opclass. We do *not* reset the
619 : * eq_opr; if we already found one from the btree opclass, that
620 : * decision is still good.
621 : */
622 18692 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
623 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
624 18692 : typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
625 : }
626 :
627 : /*
628 : * Look for requested operators and functions, if we haven't already.
629 : */
630 663380 : if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
631 389078 : !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
632 : {
633 27456 : Oid eq_opr = InvalidOid;
634 :
635 27456 : if (typentry->btree_opf != InvalidOid)
636 26662 : eq_opr = get_opfamily_member(typentry->btree_opf,
637 : typentry->btree_opintype,
638 : typentry->btree_opintype,
639 : BTEqualStrategyNumber);
640 27456 : if (eq_opr == InvalidOid &&
641 794 : typentry->hash_opf != InvalidOid)
642 638 : eq_opr = get_opfamily_member(typentry->hash_opf,
643 : typentry->hash_opintype,
644 : typentry->hash_opintype,
645 : HTEqualStrategyNumber);
646 :
647 : /*
648 : * If the proposed equality operator is array_eq or record_eq, check
649 : * to see if the element type or column types support equality. If
650 : * not, array_eq or record_eq would fail at runtime, so we don't want
651 : * to report that the type has equality. (We can omit similar
652 : * checking for ranges and multiranges because ranges can't be created
653 : * in the first place unless their subtypes support equality.)
654 : */
655 27456 : if (eq_opr == ARRAY_EQ_OP &&
656 2420 : !array_element_has_equality(typentry))
657 298 : eq_opr = InvalidOid;
658 27158 : else if (eq_opr == RECORD_EQ_OP &&
659 398 : !record_fields_have_equality(typentry))
660 160 : eq_opr = InvalidOid;
661 :
662 : /* Force update of eq_opr_finfo only if we're changing state */
663 27456 : if (typentry->eq_opr != eq_opr)
664 26094 : typentry->eq_opr_finfo.fn_oid = InvalidOid;
665 :
666 27456 : typentry->eq_opr = eq_opr;
667 :
668 : /*
669 : * Reset info about hash functions whenever we pick up new info about
670 : * equality operator. This is so we can ensure that the hash
671 : * functions match the operator.
672 : */
673 27456 : typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
674 : TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
675 27456 : typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
676 : }
677 663380 : if ((flags & TYPECACHE_LT_OPR) &&
678 244426 : !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
679 : {
680 14900 : Oid lt_opr = InvalidOid;
681 :
682 14900 : if (typentry->btree_opf != InvalidOid)
683 14584 : lt_opr = get_opfamily_member(typentry->btree_opf,
684 : typentry->btree_opintype,
685 : typentry->btree_opintype,
686 : BTLessStrategyNumber);
687 :
688 : /*
689 : * As above, make sure array_cmp or record_cmp will succeed; but again
690 : * we need no special check for ranges or multiranges.
691 : */
692 14900 : if (lt_opr == ARRAY_LT_OP &&
693 1906 : !array_element_has_compare(typentry))
694 438 : lt_opr = InvalidOid;
695 14462 : else if (lt_opr == RECORD_LT_OP &&
696 126 : !record_fields_have_compare(typentry))
697 12 : lt_opr = InvalidOid;
698 :
699 14900 : typentry->lt_opr = lt_opr;
700 14900 : typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
701 : }
702 663380 : if ((flags & TYPECACHE_GT_OPR) &&
703 239304 : !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
704 : {
705 14856 : Oid gt_opr = InvalidOid;
706 :
707 14856 : if (typentry->btree_opf != InvalidOid)
708 14540 : gt_opr = get_opfamily_member(typentry->btree_opf,
709 : typentry->btree_opintype,
710 : typentry->btree_opintype,
711 : BTGreaterStrategyNumber);
712 :
713 : /*
714 : * As above, make sure array_cmp or record_cmp will succeed; but again
715 : * we need no special check for ranges or multiranges.
716 : */
717 14856 : if (gt_opr == ARRAY_GT_OP &&
718 1900 : !array_element_has_compare(typentry))
719 438 : gt_opr = InvalidOid;
720 14418 : else if (gt_opr == RECORD_GT_OP &&
721 126 : !record_fields_have_compare(typentry))
722 12 : gt_opr = InvalidOid;
723 :
724 14856 : typentry->gt_opr = gt_opr;
725 14856 : typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
726 : }
727 663380 : if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
728 22810 : !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
729 : {
730 3586 : Oid cmp_proc = InvalidOid;
731 :
732 3586 : if (typentry->btree_opf != InvalidOid)
733 3422 : cmp_proc = get_opfamily_proc(typentry->btree_opf,
734 : typentry->btree_opintype,
735 : typentry->btree_opintype,
736 : BTORDER_PROC);
737 :
738 : /*
739 : * As above, make sure array_cmp or record_cmp will succeed; but again
740 : * we need no special check for ranges or multiranges.
741 : */
742 3586 : if (cmp_proc == F_BTARRAYCMP &&
743 674 : !array_element_has_compare(typentry))
744 142 : cmp_proc = InvalidOid;
745 3444 : else if (cmp_proc == F_BTRECORDCMP &&
746 216 : !record_fields_have_compare(typentry))
747 142 : cmp_proc = InvalidOid;
748 :
749 : /* Force update of cmp_proc_finfo only if we're changing state */
750 3586 : if (typentry->cmp_proc != cmp_proc)
751 3104 : typentry->cmp_proc_finfo.fn_oid = InvalidOid;
752 :
753 3586 : typentry->cmp_proc = cmp_proc;
754 3586 : typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
755 : }
756 663380 : if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
757 268780 : !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
758 : {
759 18504 : Oid hash_proc = InvalidOid;
760 :
761 : /*
762 : * We insist that the eq_opr, if one has been determined, match the
763 : * hash opclass; else report there is no hash function.
764 : */
765 18504 : if (typentry->hash_opf != InvalidOid &&
766 35988 : (!OidIsValid(typentry->eq_opr) ||
767 17654 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
768 : typentry->hash_opintype,
769 : typentry->hash_opintype,
770 : HTEqualStrategyNumber)))
771 18334 : hash_proc = get_opfamily_proc(typentry->hash_opf,
772 : typentry->hash_opintype,
773 : typentry->hash_opintype,
774 : HASHSTANDARD_PROC);
775 :
776 : /*
777 : * As above, make sure hash_array, hash_record, or hash_range will
778 : * succeed.
779 : */
780 18504 : if (hash_proc == F_HASH_ARRAY &&
781 1634 : !array_element_has_hashing(typentry))
782 160 : hash_proc = InvalidOid;
783 18344 : else if (hash_proc == F_HASH_RECORD &&
784 382 : !record_fields_have_hashing(typentry))
785 192 : hash_proc = InvalidOid;
786 18152 : else if (hash_proc == F_HASH_RANGE &&
787 32 : !range_element_has_hashing(typentry))
788 6 : hash_proc = InvalidOid;
789 :
790 : /*
791 : * Likewise for hash_multirange.
792 : */
793 18504 : if (hash_proc == F_HASH_MULTIRANGE &&
794 18 : !multirange_element_has_hashing(typentry))
795 6 : hash_proc = InvalidOid;
796 :
797 : /* Force update of hash_proc_finfo only if we're changing state */
798 18504 : if (typentry->hash_proc != hash_proc)
799 16972 : typentry->hash_proc_finfo.fn_oid = InvalidOid;
800 :
801 18504 : typentry->hash_proc = hash_proc;
802 18504 : typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
803 : }
804 663380 : if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
805 6906 : TYPECACHE_HASH_EXTENDED_PROC_FINFO)) &&
806 6906 : !(typentry->flags & TCFLAGS_CHECKED_HASH_EXTENDED_PROC))
807 : {
808 2966 : Oid hash_extended_proc = InvalidOid;
809 :
810 : /*
811 : * We insist that the eq_opr, if one has been determined, match the
812 : * hash opclass; else report there is no hash function.
813 : */
814 2966 : if (typentry->hash_opf != InvalidOid &&
815 5532 : (!OidIsValid(typentry->eq_opr) ||
816 2604 : typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
817 : typentry->hash_opintype,
818 : typentry->hash_opintype,
819 : HTEqualStrategyNumber)))
820 2928 : hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
821 : typentry->hash_opintype,
822 : typentry->hash_opintype,
823 : HASHEXTENDED_PROC);
824 :
825 : /*
826 : * As above, make sure hash_array_extended, hash_record_extended, or
827 : * hash_range_extended will succeed.
828 : */
829 2966 : if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
830 296 : !array_element_has_extended_hashing(typentry))
831 142 : hash_extended_proc = InvalidOid;
832 2824 : else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
833 156 : !record_fields_have_extended_hashing(typentry))
834 148 : hash_extended_proc = InvalidOid;
835 2676 : else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
836 0 : !range_element_has_extended_hashing(typentry))
837 0 : hash_extended_proc = InvalidOid;
838 :
839 : /*
840 : * Likewise for hash_multirange_extended.
841 : */
842 2966 : if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
843 0 : !multirange_element_has_extended_hashing(typentry))
844 0 : hash_extended_proc = InvalidOid;
845 :
846 : /* Force update of proc finfo only if we're changing state */
847 2966 : if (typentry->hash_extended_proc != hash_extended_proc)
848 2626 : typentry->hash_extended_proc_finfo.fn_oid = InvalidOid;
849 :
850 2966 : typentry->hash_extended_proc = hash_extended_proc;
851 2966 : typentry->flags |= TCFLAGS_CHECKED_HASH_EXTENDED_PROC;
852 : }
853 :
854 : /*
855 : * Set up fmgr lookup info as requested
856 : *
857 : * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
858 : * which is not quite right (they're really in the hash table's private
859 : * memory context) but this will do for our purposes.
860 : *
861 : * Note: the code above avoids invalidating the finfo structs unless the
862 : * referenced operator/function OID actually changes. This is to prevent
863 : * unnecessary leakage of any subsidiary data attached to an finfo, since
864 : * that would cause session-lifespan memory leaks.
865 : */
866 663380 : if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
867 4922 : typentry->eq_opr_finfo.fn_oid == InvalidOid &&
868 1366 : typentry->eq_opr != InvalidOid)
869 : {
870 : Oid eq_opr_func;
871 :
872 1360 : eq_opr_func = get_opcode(typentry->eq_opr);
873 1360 : if (eq_opr_func != InvalidOid)
874 1360 : fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
875 : CacheMemoryContext);
876 : }
877 663380 : if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
878 13066 : typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
879 3348 : typentry->cmp_proc != InvalidOid)
880 : {
881 1248 : fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
882 : CacheMemoryContext);
883 : }
884 663380 : if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
885 6826 : typentry->hash_proc_finfo.fn_oid == InvalidOid &&
886 1372 : typentry->hash_proc != InvalidOid)
887 : {
888 1206 : fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
889 : CacheMemoryContext);
890 : }
891 663380 : if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
892 114 : typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
893 36 : typentry->hash_extended_proc != InvalidOid)
894 : {
895 24 : fmgr_info_cxt(typentry->hash_extended_proc,
896 : &typentry->hash_extended_proc_finfo,
897 : CacheMemoryContext);
898 : }
899 :
900 : /*
901 : * If it's a composite type (row type), get tupdesc if requested
902 : */
903 663380 : if ((flags & TYPECACHE_TUPDESC) &&
904 81144 : typentry->tupDesc == NULL &&
905 3568 : typentry->typtype == TYPTYPE_COMPOSITE)
906 : {
907 3442 : load_typcache_tupdesc(typentry);
908 : }
909 :
910 : /*
911 : * If requested, get information about a range type
912 : *
913 : * This includes making sure that the basic info about the range element
914 : * type is up-to-date.
915 : */
916 663380 : if ((flags & TYPECACHE_RANGE_INFO) &&
917 29160 : typentry->typtype == TYPTYPE_RANGE)
918 : {
919 29160 : if (typentry->rngelemtype == NULL)
920 486 : load_rangetype_info(typentry);
921 28674 : else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
922 4 : (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
923 : }
924 :
925 : /*
926 : * If requested, get information about a multirange type
927 : */
928 663380 : if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
929 12442 : typentry->rngtype == NULL &&
930 200 : typentry->typtype == TYPTYPE_MULTIRANGE)
931 : {
932 200 : load_multirangetype_info(typentry);
933 : }
934 :
935 : /*
936 : * If requested, get information about a domain type
937 : */
938 663380 : if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
939 7944 : typentry->domainBaseType == InvalidOid &&
940 5060 : typentry->typtype == TYPTYPE_DOMAIN)
941 : {
942 460 : typentry->domainBaseTypmod = -1;
943 460 : typentry->domainBaseType =
944 460 : getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
945 : }
946 663380 : if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
947 41332 : (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
948 6070 : typentry->typtype == TYPTYPE_DOMAIN)
949 : {
950 2486 : load_domaintype_info(typentry);
951 : }
952 :
953 663380 : INJECTION_POINT("typecache-before-rel-type-cache-insert");
954 :
955 : Assert(in_progress_offset + 1 == in_progress_list_len);
956 663378 : in_progress_list_len--;
957 :
958 663378 : insert_rel_type_cache_if_needed(typentry);
959 :
960 663378 : return typentry;
961 : }
962 :
963 : /*
964 : * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
965 : */
966 : static void
967 3656 : load_typcache_tupdesc(TypeCacheEntry *typentry)
968 : {
969 : Relation rel;
970 :
971 3656 : if (!OidIsValid(typentry->typrelid)) /* should not happen */
972 0 : elog(ERROR, "invalid typrelid for composite type %u",
973 : typentry->type_id);
974 3656 : rel = relation_open(typentry->typrelid, AccessShareLock);
975 : Assert(rel->rd_rel->reltype == typentry->type_id);
976 :
977 : /*
978 : * Link to the tupdesc and increment its refcount (we assert it's a
979 : * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
980 : * because the reference mustn't be entered in the current resource owner;
981 : * it can outlive the current query.
982 : */
983 3656 : typentry->tupDesc = RelationGetDescr(rel);
984 :
985 : Assert(typentry->tupDesc->tdrefcount > 0);
986 3656 : typentry->tupDesc->tdrefcount++;
987 :
988 : /*
989 : * In future, we could take some pains to not change tupDesc_identifier if
990 : * the tupdesc didn't really change; but for now it's not worth it.
991 : */
992 3656 : typentry->tupDesc_identifier = ++tupledesc_id_counter;
993 :
994 3656 : relation_close(rel, AccessShareLock);
995 3656 : }
996 :
997 : /*
998 : * load_rangetype_info --- helper routine to set up range type information
999 : */
1000 : static void
1001 486 : load_rangetype_info(TypeCacheEntry *typentry)
1002 : {
1003 : Form_pg_range pg_range;
1004 : HeapTuple tup;
1005 : Oid subtypeOid;
1006 : Oid opclassOid;
1007 : Oid canonicalOid;
1008 : Oid subdiffOid;
1009 : Oid opfamilyOid;
1010 : Oid opcintype;
1011 : Oid cmpFnOid;
1012 :
1013 : /* get information from pg_range */
1014 486 : tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
1015 : /* should not fail, since we already checked typtype ... */
1016 486 : if (!HeapTupleIsValid(tup))
1017 0 : elog(ERROR, "cache lookup failed for range type %u",
1018 : typentry->type_id);
1019 486 : pg_range = (Form_pg_range) GETSTRUCT(tup);
1020 :
1021 486 : subtypeOid = pg_range->rngsubtype;
1022 486 : typentry->rng_collation = pg_range->rngcollation;
1023 486 : opclassOid = pg_range->rngsubopc;
1024 486 : canonicalOid = pg_range->rngcanonical;
1025 486 : subdiffOid = pg_range->rngsubdiff;
1026 :
1027 486 : ReleaseSysCache(tup);
1028 :
1029 : /* get opclass properties and look up the comparison function */
1030 486 : opfamilyOid = get_opclass_family(opclassOid);
1031 486 : opcintype = get_opclass_input_type(opclassOid);
1032 486 : typentry->rng_opfamily = opfamilyOid;
1033 :
1034 486 : cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
1035 : BTORDER_PROC);
1036 486 : if (!RegProcedureIsValid(cmpFnOid))
1037 0 : elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
1038 : BTORDER_PROC, opcintype, opcintype, opfamilyOid);
1039 :
1040 : /* set up cached fmgrinfo structs */
1041 486 : fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
1042 : CacheMemoryContext);
1043 486 : if (OidIsValid(canonicalOid))
1044 224 : fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
1045 : CacheMemoryContext);
1046 486 : if (OidIsValid(subdiffOid))
1047 352 : fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
1048 : CacheMemoryContext);
1049 :
1050 : /* Lastly, set up link to the element type --- this marks data valid */
1051 486 : typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
1052 486 : }
1053 :
1054 : /*
1055 : * load_multirangetype_info --- helper routine to set up multirange type
1056 : * information
1057 : */
1058 : static void
1059 200 : load_multirangetype_info(TypeCacheEntry *typentry)
1060 : {
1061 : Oid rangetypeOid;
1062 :
1063 200 : rangetypeOid = get_multirange_range(typentry->type_id);
1064 200 : if (!OidIsValid(rangetypeOid))
1065 0 : elog(ERROR, "cache lookup failed for multirange type %u",
1066 : typentry->type_id);
1067 :
1068 200 : typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
1069 200 : }
1070 :
1071 : /*
1072 : * load_domaintype_info --- helper routine to set up domain constraint info
1073 : *
1074 : * Note: we assume we're called in a relatively short-lived context, so it's
1075 : * okay to leak data into the current context while scanning pg_constraint.
1076 : * We build the new DomainConstraintCache data in a context underneath
1077 : * CurrentMemoryContext, and reparent it under CacheMemoryContext when
1078 : * complete.
1079 : */
1080 : static void
1081 2486 : load_domaintype_info(TypeCacheEntry *typentry)
1082 : {
1083 2486 : Oid typeOid = typentry->type_id;
1084 : DomainConstraintCache *dcc;
1085 2486 : bool notNull = false;
1086 : DomainConstraintState **ccons;
1087 : int cconslen;
1088 : Relation conRel;
1089 : MemoryContext oldcxt;
1090 :
1091 : /*
1092 : * If we're here, any existing constraint info is stale, so release it.
1093 : * For safety, be sure to null the link before trying to delete the data.
1094 : */
1095 2486 : if (typentry->domainData)
1096 : {
1097 648 : dcc = typentry->domainData;
1098 648 : typentry->domainData = NULL;
1099 648 : decr_dcc_refcount(dcc);
1100 : }
1101 :
1102 : /*
1103 : * We try to optimize the common case of no domain constraints, so don't
1104 : * create the dcc object and context until we find a constraint. Likewise
1105 : * for the temp sorting array.
1106 : */
1107 2486 : dcc = NULL;
1108 2486 : ccons = NULL;
1109 2486 : cconslen = 0;
1110 :
1111 : /*
1112 : * Scan pg_constraint for relevant constraints. We want to find
1113 : * constraints for not just this domain, but any ancestor domains, so the
1114 : * outer loop crawls up the domain stack.
1115 : */
1116 2486 : conRel = table_open(ConstraintRelationId, AccessShareLock);
1117 :
1118 : for (;;)
1119 2514 : {
1120 : HeapTuple tup;
1121 : HeapTuple conTup;
1122 : Form_pg_type typTup;
1123 5000 : int nccons = 0;
1124 : ScanKeyData key[1];
1125 : SysScanDesc scan;
1126 :
1127 5000 : tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
1128 5000 : if (!HeapTupleIsValid(tup))
1129 0 : elog(ERROR, "cache lookup failed for type %u", typeOid);
1130 5000 : typTup = (Form_pg_type) GETSTRUCT(tup);
1131 :
1132 5000 : if (typTup->typtype != TYPTYPE_DOMAIN)
1133 : {
1134 : /* Not a domain, so done */
1135 2486 : ReleaseSysCache(tup);
1136 2486 : break;
1137 : }
1138 :
1139 : /* Test for NOT NULL Constraint */
1140 2514 : if (typTup->typnotnull)
1141 136 : notNull = true;
1142 :
1143 : /* Look for CHECK Constraints on this domain */
1144 2514 : ScanKeyInit(&key[0],
1145 : Anum_pg_constraint_contypid,
1146 : BTEqualStrategyNumber, F_OIDEQ,
1147 : ObjectIdGetDatum(typeOid));
1148 :
1149 2514 : scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
1150 : NULL, 1, key);
1151 :
1152 3822 : while (HeapTupleIsValid(conTup = systable_getnext(scan)))
1153 : {
1154 1308 : Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
1155 : Datum val;
1156 : bool isNull;
1157 : char *constring;
1158 : Expr *check_expr;
1159 : DomainConstraintState *r;
1160 :
1161 : /* Ignore non-CHECK constraints */
1162 1308 : if (c->contype != CONSTRAINT_CHECK)
1163 136 : continue;
1164 :
1165 : /* Not expecting conbin to be NULL, but we'll test for it anyway */
1166 1172 : val = fastgetattr(conTup, Anum_pg_constraint_conbin,
1167 : conRel->rd_att, &isNull);
1168 1172 : if (isNull)
1169 0 : elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
1170 : NameStr(typTup->typname), NameStr(c->conname));
1171 :
1172 : /* Convert conbin to C string in caller context */
1173 1172 : constring = TextDatumGetCString(val);
1174 :
1175 : /* Create the DomainConstraintCache object and context if needed */
1176 1172 : if (dcc == NULL)
1177 : {
1178 : MemoryContext cxt;
1179 :
1180 1144 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1181 : "Domain constraints",
1182 : ALLOCSET_SMALL_SIZES);
1183 : dcc = (DomainConstraintCache *)
1184 1144 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1185 1144 : dcc->constraints = NIL;
1186 1144 : dcc->dccContext = cxt;
1187 1144 : dcc->dccRefCount = 0;
1188 : }
1189 :
1190 : /* Create node trees in DomainConstraintCache's context */
1191 1172 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1192 :
1193 1172 : check_expr = (Expr *) stringToNode(constring);
1194 :
1195 : /*
1196 : * Plan the expression, since ExecInitExpr will expect that.
1197 : *
1198 : * Note: caching the result of expression_planner() is not very
1199 : * good practice. Ideally we'd use a CachedExpression here so
1200 : * that we would react promptly to, eg, changes in inlined
1201 : * functions. However, because we don't support mutable domain
1202 : * CHECK constraints, it's not really clear that it's worth the
1203 : * extra overhead to do that.
1204 : */
1205 1172 : check_expr = expression_planner(check_expr);
1206 :
1207 1172 : r = makeNode(DomainConstraintState);
1208 1172 : r->constrainttype = DOM_CONSTRAINT_CHECK;
1209 1172 : r->name = pstrdup(NameStr(c->conname));
1210 1172 : r->check_expr = check_expr;
1211 1172 : r->check_exprstate = NULL;
1212 :
1213 1172 : MemoryContextSwitchTo(oldcxt);
1214 :
1215 : /* Accumulate constraints in an array, for sorting below */
1216 1172 : if (ccons == NULL)
1217 : {
1218 1144 : cconslen = 8;
1219 : ccons = (DomainConstraintState **)
1220 1144 : palloc(cconslen * sizeof(DomainConstraintState *));
1221 : }
1222 28 : else if (nccons >= cconslen)
1223 : {
1224 0 : cconslen *= 2;
1225 : ccons = (DomainConstraintState **)
1226 0 : repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
1227 : }
1228 1172 : ccons[nccons++] = r;
1229 : }
1230 :
1231 2514 : systable_endscan(scan);
1232 :
1233 2514 : if (nccons > 0)
1234 : {
1235 : /*
1236 : * Sort the items for this domain, so that CHECKs are applied in a
1237 : * deterministic order.
1238 : */
1239 1158 : if (nccons > 1)
1240 12 : qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
1241 :
1242 : /*
1243 : * Now attach them to the overall list. Use lcons() here because
1244 : * constraints of parent domains should be applied earlier.
1245 : */
1246 1158 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1247 2330 : while (nccons > 0)
1248 1172 : dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
1249 1158 : MemoryContextSwitchTo(oldcxt);
1250 : }
1251 :
1252 : /* loop to next domain in stack */
1253 2514 : typeOid = typTup->typbasetype;
1254 2514 : ReleaseSysCache(tup);
1255 : }
1256 :
1257 2486 : table_close(conRel, AccessShareLock);
1258 :
1259 : /*
1260 : * Only need to add one NOT NULL check regardless of how many domains in
1261 : * the stack request it.
1262 : */
1263 2486 : if (notNull)
1264 : {
1265 : DomainConstraintState *r;
1266 :
1267 : /* Create the DomainConstraintCache object and context if needed */
1268 136 : if (dcc == NULL)
1269 : {
1270 : MemoryContext cxt;
1271 :
1272 110 : cxt = AllocSetContextCreate(CurrentMemoryContext,
1273 : "Domain constraints",
1274 : ALLOCSET_SMALL_SIZES);
1275 : dcc = (DomainConstraintCache *)
1276 110 : MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
1277 110 : dcc->constraints = NIL;
1278 110 : dcc->dccContext = cxt;
1279 110 : dcc->dccRefCount = 0;
1280 : }
1281 :
1282 : /* Create node trees in DomainConstraintCache's context */
1283 136 : oldcxt = MemoryContextSwitchTo(dcc->dccContext);
1284 :
1285 136 : r = makeNode(DomainConstraintState);
1286 :
1287 136 : r->constrainttype = DOM_CONSTRAINT_NOTNULL;
1288 136 : r->name = pstrdup("NOT NULL");
1289 136 : r->check_expr = NULL;
1290 136 : r->check_exprstate = NULL;
1291 :
1292 : /* lcons to apply the nullness check FIRST */
1293 136 : dcc->constraints = lcons(r, dcc->constraints);
1294 :
1295 136 : MemoryContextSwitchTo(oldcxt);
1296 : }
1297 :
1298 : /*
1299 : * If we made a constraint object, move it into CacheMemoryContext and
1300 : * attach it to the typcache entry.
1301 : */
1302 2486 : if (dcc)
1303 : {
1304 1254 : MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
1305 1254 : typentry->domainData = dcc;
1306 1254 : dcc->dccRefCount++; /* count the typcache's reference */
1307 : }
1308 :
1309 : /* Either way, the typcache entry's domain data is now valid. */
1310 2486 : typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
1311 2486 : }
1312 :
1313 : /*
1314 : * qsort comparator to sort DomainConstraintState pointers by name
1315 : */
1316 : static int
1317 14 : dcs_cmp(const void *a, const void *b)
1318 : {
1319 14 : const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
1320 14 : const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
1321 :
1322 14 : return strcmp((*ca)->name, (*cb)->name);
1323 : }
1324 :
1325 : /*
1326 : * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
1327 : * and free it if no references remain
1328 : */
1329 : static void
1330 12034 : decr_dcc_refcount(DomainConstraintCache *dcc)
1331 : {
1332 : Assert(dcc->dccRefCount > 0);
1333 12034 : if (--(dcc->dccRefCount) <= 0)
1334 644 : MemoryContextDelete(dcc->dccContext);
1335 12034 : }
1336 :
1337 : /*
1338 : * Context reset/delete callback for a DomainConstraintRef
1339 : */
1340 : static void
1341 12074 : dccref_deletion_callback(void *arg)
1342 : {
1343 12074 : DomainConstraintRef *ref = (DomainConstraintRef *) arg;
1344 12074 : DomainConstraintCache *dcc = ref->dcc;
1345 :
1346 : /* Paranoia --- be sure link is nulled before trying to release */
1347 12074 : if (dcc)
1348 : {
1349 11386 : ref->constraints = NIL;
1350 11386 : ref->dcc = NULL;
1351 11386 : decr_dcc_refcount(dcc);
1352 : }
1353 12074 : }
1354 :
1355 : /*
1356 : * prep_domain_constraints --- prepare domain constraints for execution
1357 : *
1358 : * The expression trees stored in the DomainConstraintCache's list are
1359 : * converted to executable expression state trees stored in execctx.
1360 : */
1361 : static List *
1362 2544 : prep_domain_constraints(List *constraints, MemoryContext execctx)
1363 : {
1364 2544 : List *result = NIL;
1365 : MemoryContext oldcxt;
1366 : ListCell *lc;
1367 :
1368 2544 : oldcxt = MemoryContextSwitchTo(execctx);
1369 :
1370 5112 : foreach(lc, constraints)
1371 : {
1372 2568 : DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
1373 : DomainConstraintState *newr;
1374 :
1375 2568 : newr = makeNode(DomainConstraintState);
1376 2568 : newr->constrainttype = r->constrainttype;
1377 2568 : newr->name = r->name;
1378 2568 : newr->check_expr = r->check_expr;
1379 2568 : newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
1380 :
1381 2568 : result = lappend(result, newr);
1382 : }
1383 :
1384 2544 : MemoryContextSwitchTo(oldcxt);
1385 :
1386 2544 : return result;
1387 : }
1388 :
1389 : /*
1390 : * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
1391 : *
1392 : * Caller must tell us the MemoryContext in which the DomainConstraintRef
1393 : * lives. The ref will be cleaned up when that context is reset/deleted.
1394 : *
1395 : * Caller must also tell us whether it wants check_exprstate fields to be
1396 : * computed in the DomainConstraintState nodes attached to this ref.
1397 : * If it doesn't, we need not make a copy of the DomainConstraintState list.
1398 : */
1399 : void
1400 12102 : InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
1401 : MemoryContext refctx, bool need_exprstate)
1402 : {
1403 : /* Look up the typcache entry --- we assume it survives indefinitely */
1404 12102 : ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1405 12102 : ref->need_exprstate = need_exprstate;
1406 : /* For safety, establish the callback before acquiring a refcount */
1407 12102 : ref->refctx = refctx;
1408 12102 : ref->dcc = NULL;
1409 12102 : ref->callback.func = dccref_deletion_callback;
1410 12102 : ref->callback.arg = (void *) ref;
1411 12102 : MemoryContextRegisterResetCallback(refctx, &ref->callback);
1412 : /* Acquire refcount if there are constraints, and set up exported list */
1413 12102 : if (ref->tcache->domainData)
1414 : {
1415 11414 : ref->dcc = ref->tcache->domainData;
1416 11414 : ref->dcc->dccRefCount++;
1417 11414 : if (ref->need_exprstate)
1418 2544 : ref->constraints = prep_domain_constraints(ref->dcc->constraints,
1419 : ref->refctx);
1420 : else
1421 8870 : ref->constraints = ref->dcc->constraints;
1422 : }
1423 : else
1424 688 : ref->constraints = NIL;
1425 12102 : }
1426 :
1427 : /*
1428 : * UpdateDomainConstraintRef --- recheck validity of domain constraint info
1429 : *
1430 : * If the domain's constraint set changed, ref->constraints is updated to
1431 : * point at a new list of cached constraints.
1432 : *
1433 : * In the normal case where nothing happened to the domain, this is cheap
1434 : * enough that it's reasonable (and expected) to check before *each* use
1435 : * of the constraint info.
1436 : */
1437 : void
1438 393260 : UpdateDomainConstraintRef(DomainConstraintRef *ref)
1439 : {
1440 393260 : TypeCacheEntry *typentry = ref->tcache;
1441 :
1442 : /* Make sure typcache entry's data is up to date */
1443 393260 : if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1444 0 : typentry->typtype == TYPTYPE_DOMAIN)
1445 0 : load_domaintype_info(typentry);
1446 :
1447 : /* Transfer to ref object if there's new info, adjusting refcounts */
1448 393260 : if (ref->dcc != typentry->domainData)
1449 : {
1450 : /* Paranoia --- be sure link is nulled before trying to release */
1451 0 : DomainConstraintCache *dcc = ref->dcc;
1452 :
1453 0 : if (dcc)
1454 : {
1455 : /*
1456 : * Note: we just leak the previous list of executable domain
1457 : * constraints. Alternatively, we could keep those in a child
1458 : * context of ref->refctx and free that context at this point.
1459 : * However, in practice this code path will be taken so seldom
1460 : * that the extra bookkeeping for a child context doesn't seem
1461 : * worthwhile; we'll just allow a leak for the lifespan of refctx.
1462 : */
1463 0 : ref->constraints = NIL;
1464 0 : ref->dcc = NULL;
1465 0 : decr_dcc_refcount(dcc);
1466 : }
1467 0 : dcc = typentry->domainData;
1468 0 : if (dcc)
1469 : {
1470 0 : ref->dcc = dcc;
1471 0 : dcc->dccRefCount++;
1472 0 : if (ref->need_exprstate)
1473 0 : ref->constraints = prep_domain_constraints(dcc->constraints,
1474 : ref->refctx);
1475 : else
1476 0 : ref->constraints = dcc->constraints;
1477 : }
1478 : }
1479 393260 : }
1480 :
1481 : /*
1482 : * DomainHasConstraints --- utility routine to check if a domain has constraints
1483 : *
1484 : * This is defined to return false, not fail, if type is not a domain.
1485 : */
1486 : bool
1487 29230 : DomainHasConstraints(Oid type_id)
1488 : {
1489 : TypeCacheEntry *typentry;
1490 :
1491 : /*
1492 : * Note: a side effect is to cause the typcache's domain data to become
1493 : * valid. This is fine since we'll likely need it soon if there is any.
1494 : */
1495 29230 : typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
1496 :
1497 29230 : return (typentry->domainData != NULL);
1498 : }
1499 :
1500 :
1501 : /*
1502 : * array_element_has_equality and friends are helper routines to check
1503 : * whether we should believe that array_eq and related functions will work
1504 : * on the given array type or composite type.
1505 : *
1506 : * The logic above may call these repeatedly on the same type entry, so we
1507 : * make use of the typentry->flags field to cache the results once known.
1508 : * Also, we assume that we'll probably want all these facts about the type
1509 : * if we want any, so we cache them all using only one lookup of the
1510 : * component datatype(s).
1511 : */
1512 :
1513 : static bool
1514 2420 : array_element_has_equality(TypeCacheEntry *typentry)
1515 : {
1516 2420 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1517 2096 : cache_array_element_properties(typentry);
1518 2420 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1519 : }
1520 :
1521 : static bool
1522 4480 : array_element_has_compare(TypeCacheEntry *typentry)
1523 : {
1524 4480 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1525 370 : cache_array_element_properties(typentry);
1526 4480 : return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1527 : }
1528 :
1529 : static bool
1530 1634 : array_element_has_hashing(TypeCacheEntry *typentry)
1531 : {
1532 1634 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1533 0 : cache_array_element_properties(typentry);
1534 1634 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1535 : }
1536 :
1537 : static bool
1538 296 : array_element_has_extended_hashing(TypeCacheEntry *typentry)
1539 : {
1540 296 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1541 0 : cache_array_element_properties(typentry);
1542 296 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1543 : }
1544 :
1545 : static void
1546 2466 : cache_array_element_properties(TypeCacheEntry *typentry)
1547 : {
1548 2466 : Oid elem_type = get_base_element_type(typentry->type_id);
1549 :
1550 2466 : if (OidIsValid(elem_type))
1551 : {
1552 : TypeCacheEntry *elementry;
1553 :
1554 2310 : elementry = lookup_type_cache(elem_type,
1555 : TYPECACHE_EQ_OPR |
1556 : TYPECACHE_CMP_PROC |
1557 : TYPECACHE_HASH_PROC |
1558 : TYPECACHE_HASH_EXTENDED_PROC);
1559 2310 : if (OidIsValid(elementry->eq_opr))
1560 2168 : typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1561 2310 : if (OidIsValid(elementry->cmp_proc))
1562 2016 : typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1563 2310 : if (OidIsValid(elementry->hash_proc))
1564 2156 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1565 2310 : if (OidIsValid(elementry->hash_extended_proc))
1566 2156 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1567 : }
1568 2466 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1569 2466 : }
1570 :
1571 : /*
1572 : * Likewise, some helper functions for composite types.
1573 : */
1574 :
1575 : static bool
1576 398 : record_fields_have_equality(TypeCacheEntry *typentry)
1577 : {
1578 398 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1579 370 : cache_record_field_properties(typentry);
1580 398 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1581 : }
1582 :
1583 : static bool
1584 468 : record_fields_have_compare(TypeCacheEntry *typentry)
1585 : {
1586 468 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1587 60 : cache_record_field_properties(typentry);
1588 468 : return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1589 : }
1590 :
1591 : static bool
1592 382 : record_fields_have_hashing(TypeCacheEntry *typentry)
1593 : {
1594 382 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1595 6 : cache_record_field_properties(typentry);
1596 382 : return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
1597 : }
1598 :
1599 : static bool
1600 156 : record_fields_have_extended_hashing(TypeCacheEntry *typentry)
1601 : {
1602 156 : if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1603 0 : cache_record_field_properties(typentry);
1604 156 : return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
1605 : }
1606 :
1607 : static void
1608 436 : cache_record_field_properties(TypeCacheEntry *typentry)
1609 : {
1610 : /*
1611 : * For type RECORD, we can't really tell what will work, since we don't
1612 : * have access here to the specific anonymous type. Just assume that
1613 : * equality and comparison will (we may get a failure at runtime). We
1614 : * could also claim that hashing works, but then if code that has the
1615 : * option between a comparison-based (sort-based) and a hash-based plan
1616 : * chooses hashing, stuff could fail that would otherwise work if it chose
1617 : * a comparison-based plan. In practice more types support comparison
1618 : * than hashing.
1619 : */
1620 436 : if (typentry->type_id == RECORDOID)
1621 : {
1622 44 : typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1623 : TCFLAGS_HAVE_FIELD_COMPARE);
1624 : }
1625 392 : else if (typentry->typtype == TYPTYPE_COMPOSITE)
1626 : {
1627 : TupleDesc tupdesc;
1628 : int newflags;
1629 : int i;
1630 :
1631 : /* Fetch composite type's tupdesc if we don't have it already */
1632 392 : if (typentry->tupDesc == NULL)
1633 214 : load_typcache_tupdesc(typentry);
1634 392 : tupdesc = typentry->tupDesc;
1635 :
1636 : /* Must bump the refcount while we do additional catalog lookups */
1637 392 : IncrTupleDescRefCount(tupdesc);
1638 :
1639 : /* Have each property if all non-dropped fields have the property */
1640 392 : newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1641 : TCFLAGS_HAVE_FIELD_COMPARE |
1642 : TCFLAGS_HAVE_FIELD_HASHING |
1643 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1644 4664 : for (i = 0; i < tupdesc->natts; i++)
1645 : {
1646 : TypeCacheEntry *fieldentry;
1647 4432 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
1648 :
1649 4432 : if (attr->attisdropped)
1650 0 : continue;
1651 :
1652 4432 : fieldentry = lookup_type_cache(attr->atttypid,
1653 : TYPECACHE_EQ_OPR |
1654 : TYPECACHE_CMP_PROC |
1655 : TYPECACHE_HASH_PROC |
1656 : TYPECACHE_HASH_EXTENDED_PROC);
1657 4432 : if (!OidIsValid(fieldentry->eq_opr))
1658 160 : newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1659 4432 : if (!OidIsValid(fieldentry->cmp_proc))
1660 160 : newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1661 4432 : if (!OidIsValid(fieldentry->hash_proc))
1662 166 : newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
1663 4432 : if (!OidIsValid(fieldentry->hash_extended_proc))
1664 166 : newflags &= ~TCFLAGS_HAVE_FIELD_EXTENDED_HASHING;
1665 :
1666 : /* We can drop out of the loop once we disprove all bits */
1667 4432 : if (newflags == 0)
1668 160 : break;
1669 : }
1670 392 : typentry->flags |= newflags;
1671 :
1672 392 : DecrTupleDescRefCount(tupdesc);
1673 : }
1674 0 : else if (typentry->typtype == TYPTYPE_DOMAIN)
1675 : {
1676 : /* If it's domain over composite, copy base type's properties */
1677 : TypeCacheEntry *baseentry;
1678 :
1679 : /* load up basetype info if we didn't already */
1680 0 : if (typentry->domainBaseType == InvalidOid)
1681 : {
1682 0 : typentry->domainBaseTypmod = -1;
1683 0 : typentry->domainBaseType =
1684 0 : getBaseTypeAndTypmod(typentry->type_id,
1685 : &typentry->domainBaseTypmod);
1686 : }
1687 0 : baseentry = lookup_type_cache(typentry->domainBaseType,
1688 : TYPECACHE_EQ_OPR |
1689 : TYPECACHE_CMP_PROC |
1690 : TYPECACHE_HASH_PROC |
1691 : TYPECACHE_HASH_EXTENDED_PROC);
1692 0 : if (baseentry->typtype == TYPTYPE_COMPOSITE)
1693 : {
1694 0 : typentry->flags |= TCFLAGS_DOMAIN_BASE_IS_COMPOSITE;
1695 0 : typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
1696 : TCFLAGS_HAVE_FIELD_COMPARE |
1697 : TCFLAGS_HAVE_FIELD_HASHING |
1698 : TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
1699 : }
1700 : }
1701 436 : typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
1702 436 : }
1703 :
1704 : /*
1705 : * Likewise, some helper functions for range and multirange types.
1706 : *
1707 : * We can borrow the flag bits for array element properties to use for range
1708 : * element properties, since those flag bits otherwise have no use in a
1709 : * range or multirange type's typcache entry.
1710 : */
1711 :
1712 : static bool
1713 32 : range_element_has_hashing(TypeCacheEntry *typentry)
1714 : {
1715 32 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1716 32 : cache_range_element_properties(typentry);
1717 32 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1718 : }
1719 :
1720 : static bool
1721 0 : range_element_has_extended_hashing(TypeCacheEntry *typentry)
1722 : {
1723 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1724 0 : cache_range_element_properties(typentry);
1725 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1726 : }
1727 :
1728 : static void
1729 32 : cache_range_element_properties(TypeCacheEntry *typentry)
1730 : {
1731 : /* load up subtype link if we didn't already */
1732 32 : if (typentry->rngelemtype == NULL &&
1733 0 : typentry->typtype == TYPTYPE_RANGE)
1734 0 : load_rangetype_info(typentry);
1735 :
1736 32 : if (typentry->rngelemtype != NULL)
1737 : {
1738 : TypeCacheEntry *elementry;
1739 :
1740 : /* might need to calculate subtype's hash function properties */
1741 32 : elementry = lookup_type_cache(typentry->rngelemtype->type_id,
1742 : TYPECACHE_HASH_PROC |
1743 : TYPECACHE_HASH_EXTENDED_PROC);
1744 32 : if (OidIsValid(elementry->hash_proc))
1745 26 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1746 32 : if (OidIsValid(elementry->hash_extended_proc))
1747 26 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1748 : }
1749 32 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1750 32 : }
1751 :
1752 : static bool
1753 18 : multirange_element_has_hashing(TypeCacheEntry *typentry)
1754 : {
1755 18 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1756 18 : cache_multirange_element_properties(typentry);
1757 18 : return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1758 : }
1759 :
1760 : static bool
1761 0 : multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
1762 : {
1763 0 : if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1764 0 : cache_multirange_element_properties(typentry);
1765 0 : return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
1766 : }
1767 :
1768 : static void
1769 18 : cache_multirange_element_properties(TypeCacheEntry *typentry)
1770 : {
1771 : /* load up range link if we didn't already */
1772 18 : if (typentry->rngtype == NULL &&
1773 0 : typentry->typtype == TYPTYPE_MULTIRANGE)
1774 0 : load_multirangetype_info(typentry);
1775 :
1776 18 : if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
1777 : {
1778 : TypeCacheEntry *elementry;
1779 :
1780 : /* might need to calculate subtype's hash function properties */
1781 18 : elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
1782 : TYPECACHE_HASH_PROC |
1783 : TYPECACHE_HASH_EXTENDED_PROC);
1784 18 : if (OidIsValid(elementry->hash_proc))
1785 12 : typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1786 18 : if (OidIsValid(elementry->hash_extended_proc))
1787 12 : typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
1788 : }
1789 18 : typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1790 18 : }
1791 :
1792 : /*
1793 : * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
1794 : * to store 'typmod'.
1795 : */
1796 : static void
1797 14858 : ensure_record_cache_typmod_slot_exists(int32 typmod)
1798 : {
1799 14858 : if (RecordCacheArray == NULL)
1800 : {
1801 5810 : RecordCacheArray = (RecordCacheArrayEntry *)
1802 5810 : MemoryContextAllocZero(CacheMemoryContext,
1803 : 64 * sizeof(RecordCacheArrayEntry));
1804 5810 : RecordCacheArrayLen = 64;
1805 : }
1806 :
1807 14858 : if (typmod >= RecordCacheArrayLen)
1808 : {
1809 0 : int32 newlen = pg_nextpower2_32(typmod + 1);
1810 :
1811 0 : RecordCacheArray = repalloc0_array(RecordCacheArray,
1812 : RecordCacheArrayEntry,
1813 : RecordCacheArrayLen,
1814 : newlen);
1815 0 : RecordCacheArrayLen = newlen;
1816 : }
1817 14858 : }
1818 :
1819 : /*
1820 : * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1821 : *
1822 : * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1823 : * hasn't had its refcount bumped.
1824 : */
1825 : static TupleDesc
1826 121158 : lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1827 : {
1828 121158 : if (type_id != RECORDOID)
1829 : {
1830 : /*
1831 : * It's a named composite type, so use the regular typcache.
1832 : */
1833 : TypeCacheEntry *typentry;
1834 :
1835 56370 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1836 56368 : if (typentry->tupDesc == NULL && !noError)
1837 0 : ereport(ERROR,
1838 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1839 : errmsg("type %s is not composite",
1840 : format_type_be(type_id))));
1841 56368 : return typentry->tupDesc;
1842 : }
1843 : else
1844 : {
1845 : /*
1846 : * It's a transient record type, so look in our record-type table.
1847 : */
1848 64788 : if (typmod >= 0)
1849 : {
1850 : /* It is already in our local cache? */
1851 64772 : if (typmod < RecordCacheArrayLen &&
1852 64766 : RecordCacheArray[typmod].tupdesc != NULL)
1853 64742 : return RecordCacheArray[typmod].tupdesc;
1854 :
1855 : /* Are we attached to a shared record typmod registry? */
1856 30 : if (CurrentSession->shared_typmod_registry != NULL)
1857 : {
1858 : SharedTypmodTableEntry *entry;
1859 :
1860 : /* Try to find it in the shared typmod index. */
1861 30 : entry = dshash_find(CurrentSession->shared_typmod_table,
1862 : &typmod, false);
1863 30 : if (entry != NULL)
1864 : {
1865 : TupleDesc tupdesc;
1866 :
1867 : tupdesc = (TupleDesc)
1868 30 : dsa_get_address(CurrentSession->area,
1869 : entry->shared_tupdesc);
1870 : Assert(typmod == tupdesc->tdtypmod);
1871 :
1872 : /* We may need to extend the local RecordCacheArray. */
1873 30 : ensure_record_cache_typmod_slot_exists(typmod);
1874 :
1875 : /*
1876 : * Our local array can now point directly to the TupleDesc
1877 : * in shared memory, which is non-reference-counted.
1878 : */
1879 30 : RecordCacheArray[typmod].tupdesc = tupdesc;
1880 : Assert(tupdesc->tdrefcount == -1);
1881 :
1882 : /*
1883 : * We don't share tupdesc identifiers across processes, so
1884 : * assign one locally.
1885 : */
1886 30 : RecordCacheArray[typmod].id = ++tupledesc_id_counter;
1887 :
1888 30 : dshash_release_lock(CurrentSession->shared_typmod_table,
1889 : entry);
1890 :
1891 30 : return RecordCacheArray[typmod].tupdesc;
1892 : }
1893 : }
1894 : }
1895 :
1896 16 : if (!noError)
1897 0 : ereport(ERROR,
1898 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1899 : errmsg("record type has not been registered")));
1900 16 : return NULL;
1901 : }
1902 : }
1903 :
1904 : /*
1905 : * lookup_rowtype_tupdesc
1906 : *
1907 : * Given a typeid/typmod that should describe a known composite type,
1908 : * return the tuple descriptor for the type. Will ereport on failure.
1909 : * (Use ereport because this is reachable with user-specified OIDs,
1910 : * for example from record_in().)
1911 : *
1912 : * Note: on success, we increment the refcount of the returned TupleDesc,
1913 : * and log the reference in CurrentResourceOwner. Caller must call
1914 : * ReleaseTupleDesc when done using the tupdesc. (There are some
1915 : * cases in which the returned tupdesc is not refcounted, in which
1916 : * case PinTupleDesc/ReleaseTupleDesc are no-ops; but in these cases
1917 : * the tupdesc is guaranteed to live till process exit.)
1918 : */
1919 : TupleDesc
1920 72108 : lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1921 : {
1922 : TupleDesc tupDesc;
1923 :
1924 72108 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1925 72106 : PinTupleDesc(tupDesc);
1926 72106 : return tupDesc;
1927 : }
1928 :
1929 : /*
1930 : * lookup_rowtype_tupdesc_noerror
1931 : *
1932 : * As above, but if the type is not a known composite type and noError
1933 : * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1934 : * type_id is passed, you'll get an ereport anyway.)
1935 : */
1936 : TupleDesc
1937 20 : lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1938 : {
1939 : TupleDesc tupDesc;
1940 :
1941 20 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1942 20 : if (tupDesc != NULL)
1943 20 : PinTupleDesc(tupDesc);
1944 20 : return tupDesc;
1945 : }
1946 :
1947 : /*
1948 : * lookup_rowtype_tupdesc_copy
1949 : *
1950 : * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1951 : * copied into the CurrentMemoryContext and is not reference-counted.
1952 : */
1953 : TupleDesc
1954 49012 : lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1955 : {
1956 : TupleDesc tmp;
1957 :
1958 49012 : tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1959 49012 : return CreateTupleDescCopyConstr(tmp);
1960 : }
1961 :
1962 : /*
1963 : * lookup_rowtype_tupdesc_domain
1964 : *
1965 : * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
1966 : * a domain over a named composite type; so this is effectively equivalent to
1967 : * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
1968 : * except for being a tad faster.
1969 : *
1970 : * Note: the reason we don't fold the look-through-domain behavior into plain
1971 : * lookup_rowtype_tupdesc() is that we want callers to know they might be
1972 : * dealing with a domain. Otherwise they might construct a tuple that should
1973 : * be of the domain type, but not apply domain constraints.
1974 : */
1975 : TupleDesc
1976 2220 : lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
1977 : {
1978 : TupleDesc tupDesc;
1979 :
1980 2220 : if (type_id != RECORDOID)
1981 : {
1982 : /*
1983 : * Check for domain or named composite type. We might as well load
1984 : * whichever data is needed.
1985 : */
1986 : TypeCacheEntry *typentry;
1987 :
1988 2202 : typentry = lookup_type_cache(type_id,
1989 : TYPECACHE_TUPDESC |
1990 : TYPECACHE_DOMAIN_BASE_INFO);
1991 2202 : if (typentry->typtype == TYPTYPE_DOMAIN)
1992 20 : return lookup_rowtype_tupdesc_noerror(typentry->domainBaseType,
1993 : typentry->domainBaseTypmod,
1994 : noError);
1995 2182 : if (typentry->tupDesc == NULL && !noError)
1996 0 : ereport(ERROR,
1997 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1998 : errmsg("type %s is not composite",
1999 : format_type_be(type_id))));
2000 2182 : tupDesc = typentry->tupDesc;
2001 : }
2002 : else
2003 18 : tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
2004 2200 : if (tupDesc != NULL)
2005 2184 : PinTupleDesc(tupDesc);
2006 2200 : return tupDesc;
2007 : }
2008 :
2009 : /*
2010 : * Hash function for the hash table of RecordCacheEntry.
2011 : */
2012 : static uint32
2013 343896 : record_type_typmod_hash(const void *data, size_t size)
2014 : {
2015 343896 : RecordCacheEntry *entry = (RecordCacheEntry *) data;
2016 :
2017 343896 : return hashRowType(entry->tupdesc);
2018 : }
2019 :
2020 : /*
2021 : * Match function for the hash table of RecordCacheEntry.
2022 : */
2023 : static int
2024 317816 : record_type_typmod_compare(const void *a, const void *b, size_t size)
2025 : {
2026 317816 : RecordCacheEntry *left = (RecordCacheEntry *) a;
2027 317816 : RecordCacheEntry *right = (RecordCacheEntry *) b;
2028 :
2029 317816 : return equalRowTypes(left->tupdesc, right->tupdesc) ? 0 : 1;
2030 : }
2031 :
2032 : /*
2033 : * assign_record_type_typmod
2034 : *
2035 : * Given a tuple descriptor for a RECORD type, find or create a cache entry
2036 : * for the type, and set the tupdesc's tdtypmod field to a value that will
2037 : * identify this cache entry to lookup_rowtype_tupdesc.
2038 : */
2039 : void
2040 329068 : assign_record_type_typmod(TupleDesc tupDesc)
2041 : {
2042 : RecordCacheEntry *recentry;
2043 : TupleDesc entDesc;
2044 : bool found;
2045 : MemoryContext oldcxt;
2046 :
2047 : Assert(tupDesc->tdtypeid == RECORDOID);
2048 :
2049 329068 : if (RecordCacheHash == NULL)
2050 : {
2051 : /* First time through: initialize the hash table */
2052 : HASHCTL ctl;
2053 :
2054 5810 : ctl.keysize = sizeof(TupleDesc); /* just the pointer */
2055 5810 : ctl.entrysize = sizeof(RecordCacheEntry);
2056 5810 : ctl.hash = record_type_typmod_hash;
2057 5810 : ctl.match = record_type_typmod_compare;
2058 5810 : RecordCacheHash = hash_create("Record information cache", 64,
2059 : &ctl,
2060 : HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
2061 :
2062 : /* Also make sure CacheMemoryContext exists */
2063 5810 : if (!CacheMemoryContext)
2064 0 : CreateCacheMemoryContext();
2065 : }
2066 :
2067 : /*
2068 : * Find a hashtable entry for this tuple descriptor. We don't use
2069 : * HASH_ENTER yet, because if it's missing, we need to make sure that all
2070 : * the allocations succeed before we create the new entry.
2071 : */
2072 329068 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2073 : &tupDesc,
2074 : HASH_FIND, &found);
2075 329068 : if (found && recentry->tupdesc != NULL)
2076 : {
2077 314240 : tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
2078 314240 : return;
2079 : }
2080 :
2081 : /* Not present, so need to manufacture an entry */
2082 14828 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2083 :
2084 : /* Look in the SharedRecordTypmodRegistry, if attached */
2085 14828 : entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
2086 14828 : if (entDesc == NULL)
2087 : {
2088 : /*
2089 : * Make sure we have room before we CreateTupleDescCopy() or advance
2090 : * NextRecordTypmod.
2091 : */
2092 14758 : ensure_record_cache_typmod_slot_exists(NextRecordTypmod);
2093 :
2094 : /* Reference-counted local cache only. */
2095 14758 : entDesc = CreateTupleDescCopy(tupDesc);
2096 14758 : entDesc->tdrefcount = 1;
2097 14758 : entDesc->tdtypmod = NextRecordTypmod++;
2098 : }
2099 : else
2100 : {
2101 70 : ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod);
2102 : }
2103 :
2104 14828 : RecordCacheArray[entDesc->tdtypmod].tupdesc = entDesc;
2105 :
2106 : /* Assign a unique tupdesc identifier, too. */
2107 14828 : RecordCacheArray[entDesc->tdtypmod].id = ++tupledesc_id_counter;
2108 :
2109 : /* Fully initialized; create the hash table entry */
2110 14828 : recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
2111 : &tupDesc,
2112 : HASH_ENTER, NULL);
2113 14828 : recentry->tupdesc = entDesc;
2114 :
2115 : /* Update the caller's tuple descriptor. */
2116 14828 : tupDesc->tdtypmod = entDesc->tdtypmod;
2117 :
2118 14828 : MemoryContextSwitchTo(oldcxt);
2119 : }
2120 :
2121 : /*
2122 : * assign_record_type_identifier
2123 : *
2124 : * Get an identifier, which will be unique over the lifespan of this backend
2125 : * process, for the current tuple descriptor of the specified composite type.
2126 : * For named composite types, the value is guaranteed to change if the type's
2127 : * definition does. For registered RECORD types, the value will not change
2128 : * once assigned, since the registered type won't either. If an anonymous
2129 : * RECORD type is specified, we return a new identifier on each call.
2130 : */
2131 : uint64
2132 5478 : assign_record_type_identifier(Oid type_id, int32 typmod)
2133 : {
2134 5478 : if (type_id != RECORDOID)
2135 : {
2136 : /*
2137 : * It's a named composite type, so use the regular typcache.
2138 : */
2139 : TypeCacheEntry *typentry;
2140 :
2141 0 : typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
2142 0 : if (typentry->tupDesc == NULL)
2143 0 : ereport(ERROR,
2144 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2145 : errmsg("type %s is not composite",
2146 : format_type_be(type_id))));
2147 : Assert(typentry->tupDesc_identifier != 0);
2148 0 : return typentry->tupDesc_identifier;
2149 : }
2150 : else
2151 : {
2152 : /*
2153 : * It's a transient record type, so look in our record-type table.
2154 : */
2155 5478 : if (typmod >= 0 && typmod < RecordCacheArrayLen &&
2156 60 : RecordCacheArray[typmod].tupdesc != NULL)
2157 : {
2158 : Assert(RecordCacheArray[typmod].id != 0);
2159 60 : return RecordCacheArray[typmod].id;
2160 : }
2161 :
2162 : /* For anonymous or unrecognized record type, generate a new ID */
2163 5418 : return ++tupledesc_id_counter;
2164 : }
2165 : }
2166 :
2167 : /*
2168 : * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
2169 : * This exists only to avoid exposing private innards of
2170 : * SharedRecordTypmodRegistry in a header.
2171 : */
2172 : size_t
2173 138 : SharedRecordTypmodRegistryEstimate(void)
2174 : {
2175 138 : return sizeof(SharedRecordTypmodRegistry);
2176 : }
2177 :
2178 : /*
2179 : * Initialize 'registry' in a pre-existing shared memory region, which must be
2180 : * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
2181 : * bytes.
2182 : *
2183 : * 'area' will be used to allocate shared memory space as required for the
2184 : * typemod registration. The current process, expected to be a leader process
2185 : * in a parallel query, will be attached automatically and its current record
2186 : * types will be loaded into *registry. While attached, all calls to
2187 : * assign_record_type_typmod will use the shared registry. Worker backends
2188 : * will need to attach explicitly.
2189 : *
2190 : * Note that this function takes 'area' and 'segment' as arguments rather than
2191 : * accessing them via CurrentSession, because they aren't installed there
2192 : * until after this function runs.
2193 : */
2194 : void
2195 138 : SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry,
2196 : dsm_segment *segment,
2197 : dsa_area *area)
2198 : {
2199 : MemoryContext old_context;
2200 : dshash_table *record_table;
2201 : dshash_table *typmod_table;
2202 : int32 typmod;
2203 :
2204 : Assert(!IsParallelWorker());
2205 :
2206 : /* We can't already be attached to a shared registry. */
2207 : Assert(CurrentSession->shared_typmod_registry == NULL);
2208 : Assert(CurrentSession->shared_record_table == NULL);
2209 : Assert(CurrentSession->shared_typmod_table == NULL);
2210 :
2211 138 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2212 :
2213 : /* Create the hash table of tuple descriptors indexed by themselves. */
2214 138 : record_table = dshash_create(area, &srtr_record_table_params, area);
2215 :
2216 : /* Create the hash table of tuple descriptors indexed by typmod. */
2217 138 : typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
2218 :
2219 138 : MemoryContextSwitchTo(old_context);
2220 :
2221 : /* Initialize the SharedRecordTypmodRegistry. */
2222 138 : registry->record_table_handle = dshash_get_hash_table_handle(record_table);
2223 138 : registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
2224 138 : pg_atomic_init_u32(®istry->next_typmod, NextRecordTypmod);
2225 :
2226 : /*
2227 : * Copy all entries from this backend's private registry into the shared
2228 : * registry.
2229 : */
2230 244 : for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
2231 : {
2232 : SharedTypmodTableEntry *typmod_table_entry;
2233 : SharedRecordTableEntry *record_table_entry;
2234 : SharedRecordTableKey record_table_key;
2235 : dsa_pointer shared_dp;
2236 : TupleDesc tupdesc;
2237 : bool found;
2238 :
2239 106 : tupdesc = RecordCacheArray[typmod].tupdesc;
2240 106 : if (tupdesc == NULL)
2241 0 : continue;
2242 :
2243 : /* Copy the TupleDesc into shared memory. */
2244 106 : shared_dp = share_tupledesc(area, tupdesc, typmod);
2245 :
2246 : /* Insert into the typmod table. */
2247 106 : typmod_table_entry = dshash_find_or_insert(typmod_table,
2248 106 : &tupdesc->tdtypmod,
2249 : &found);
2250 106 : if (found)
2251 0 : elog(ERROR, "cannot create duplicate shared record typmod");
2252 106 : typmod_table_entry->typmod = tupdesc->tdtypmod;
2253 106 : typmod_table_entry->shared_tupdesc = shared_dp;
2254 106 : dshash_release_lock(typmod_table, typmod_table_entry);
2255 :
2256 : /* Insert into the record table. */
2257 106 : record_table_key.shared = false;
2258 106 : record_table_key.u.local_tupdesc = tupdesc;
2259 106 : record_table_entry = dshash_find_or_insert(record_table,
2260 : &record_table_key,
2261 : &found);
2262 106 : if (!found)
2263 : {
2264 106 : record_table_entry->key.shared = true;
2265 106 : record_table_entry->key.u.shared_tupdesc = shared_dp;
2266 : }
2267 106 : dshash_release_lock(record_table, record_table_entry);
2268 : }
2269 :
2270 : /*
2271 : * Set up the global state that will tell assign_record_type_typmod and
2272 : * lookup_rowtype_tupdesc_internal about the shared registry.
2273 : */
2274 138 : CurrentSession->shared_record_table = record_table;
2275 138 : CurrentSession->shared_typmod_table = typmod_table;
2276 138 : CurrentSession->shared_typmod_registry = registry;
2277 :
2278 : /*
2279 : * We install a detach hook in the leader, but only to handle cleanup on
2280 : * failure during GetSessionDsmHandle(). Once GetSessionDsmHandle() pins
2281 : * the memory, the leader process will use a shared registry until it
2282 : * exits.
2283 : */
2284 138 : on_dsm_detach(segment, shared_record_typmod_registry_detach, (Datum) 0);
2285 138 : }
2286 :
2287 : /*
2288 : * Attach to 'registry', which must have been initialized already by another
2289 : * backend. Future calls to assign_record_type_typmod and
2290 : * lookup_rowtype_tupdesc_internal will use the shared registry until the
2291 : * current session is detached.
2292 : */
2293 : void
2294 2712 : SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
2295 : {
2296 : MemoryContext old_context;
2297 : dshash_table *record_table;
2298 : dshash_table *typmod_table;
2299 :
2300 : Assert(IsParallelWorker());
2301 :
2302 : /* We can't already be attached to a shared registry. */
2303 : Assert(CurrentSession != NULL);
2304 : Assert(CurrentSession->segment != NULL);
2305 : Assert(CurrentSession->area != NULL);
2306 : Assert(CurrentSession->shared_typmod_registry == NULL);
2307 : Assert(CurrentSession->shared_record_table == NULL);
2308 : Assert(CurrentSession->shared_typmod_table == NULL);
2309 :
2310 : /*
2311 : * We can't already have typmods in our local cache, because they'd clash
2312 : * with those imported by SharedRecordTypmodRegistryInit. This should be
2313 : * a freshly started parallel worker. If we ever support worker
2314 : * recycling, a worker would need to zap its local cache in between
2315 : * servicing different queries, in order to be able to call this and
2316 : * synchronize typmods with a new leader; but that's problematic because
2317 : * we can't be very sure that record-typmod-related state hasn't escaped
2318 : * to anywhere else in the process.
2319 : */
2320 : Assert(NextRecordTypmod == 0);
2321 :
2322 2712 : old_context = MemoryContextSwitchTo(TopMemoryContext);
2323 :
2324 : /* Attach to the two hash tables. */
2325 2712 : record_table = dshash_attach(CurrentSession->area,
2326 : &srtr_record_table_params,
2327 : registry->record_table_handle,
2328 2712 : CurrentSession->area);
2329 2712 : typmod_table = dshash_attach(CurrentSession->area,
2330 : &srtr_typmod_table_params,
2331 : registry->typmod_table_handle,
2332 : NULL);
2333 :
2334 2712 : MemoryContextSwitchTo(old_context);
2335 :
2336 : /*
2337 : * Set up detach hook to run at worker exit. Currently this is the same
2338 : * as the leader's detach hook, but in future they might need to be
2339 : * different.
2340 : */
2341 2712 : on_dsm_detach(CurrentSession->segment,
2342 : shared_record_typmod_registry_detach,
2343 : PointerGetDatum(registry));
2344 :
2345 : /*
2346 : * Set up the session state that will tell assign_record_type_typmod and
2347 : * lookup_rowtype_tupdesc_internal about the shared registry.
2348 : */
2349 2712 : CurrentSession->shared_typmod_registry = registry;
2350 2712 : CurrentSession->shared_record_table = record_table;
2351 2712 : CurrentSession->shared_typmod_table = typmod_table;
2352 2712 : }
2353 :
2354 : /*
2355 : * InvalidateCompositeTypeCacheEntry
2356 : * Invalidate particular TypeCacheEntry on Relcache inval callback
2357 : *
2358 : * Delete the cached tuple descriptor (if any) for the given composite
2359 : * type, and reset whatever info we have cached about the composite type's
2360 : * comparability.
2361 : */
2362 : static void
2363 8808 : InvalidateCompositeTypeCacheEntry(TypeCacheEntry *typentry)
2364 : {
2365 : bool hadTupDescOrOpclass;
2366 :
2367 : Assert(typentry->typtype == TYPTYPE_COMPOSITE &&
2368 : OidIsValid(typentry->typrelid));
2369 :
2370 14642 : hadTupDescOrOpclass = (typentry->tupDesc != NULL) ||
2371 5834 : (typentry->flags & TCFLAGS_OPERATOR_FLAGS);
2372 :
2373 : /* Delete tupdesc if we have it */
2374 8808 : if (typentry->tupDesc != NULL)
2375 : {
2376 : /*
2377 : * Release our refcount and free the tupdesc if none remain. We can't
2378 : * use DecrTupleDescRefCount here because this reference is not logged
2379 : * by the current resource owner.
2380 : */
2381 : Assert(typentry->tupDesc->tdrefcount > 0);
2382 2974 : if (--typentry->tupDesc->tdrefcount == 0)
2383 2422 : FreeTupleDesc(typentry->tupDesc);
2384 2974 : typentry->tupDesc = NULL;
2385 :
2386 : /*
2387 : * Also clear tupDesc_identifier, so that anyone watching it will
2388 : * realize that the tupdesc has changed.
2389 : */
2390 2974 : typentry->tupDesc_identifier = 0;
2391 : }
2392 :
2393 : /* Reset equality/comparison/hashing validity information */
2394 8808 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2395 :
2396 : /* Call delete_rel_type_cache() if we actually cleared something */
2397 8808 : if (hadTupDescOrOpclass)
2398 2974 : delete_rel_type_cache_if_needed(typentry);
2399 8808 : }
2400 :
2401 : /*
2402 : * TypeCacheRelCallback
2403 : * Relcache inval callback function
2404 : *
2405 : * Delete the cached tuple descriptor (if any) for the given rel's composite
2406 : * type, or for all composite types if relid == InvalidOid. Also reset
2407 : * whatever info we have cached about the composite type's comparability.
2408 : *
2409 : * This is called when a relcache invalidation event occurs for the given
2410 : * relid. We can't use syscache to find a type corresponding to the given
2411 : * relation because the code can be called outside of transaction. Thus, we
2412 : * use the RelIdToTypeIdCacheHash map to locate appropriate typcache entry.
2413 : */
2414 : static void
2415 1735362 : TypeCacheRelCallback(Datum arg, Oid relid)
2416 : {
2417 : TypeCacheEntry *typentry;
2418 :
2419 : /*
2420 : * RelIdToTypeIdCacheHash and TypeCacheHash should exist, otherwise this
2421 : * callback wouldn't be registered
2422 : */
2423 1735362 : if (OidIsValid(relid))
2424 : {
2425 : RelIdToTypeIdCacheEntry *relentry;
2426 :
2427 : /*
2428 : * Find an RelIdToTypeIdCacheHash entry, which should exist as soon as
2429 : * corresponding typcache entry has something to clean.
2430 : */
2431 1734762 : relentry = (RelIdToTypeIdCacheEntry *) hash_search(RelIdToTypeIdCacheHash,
2432 : &relid,
2433 : HASH_FIND, NULL);
2434 :
2435 1734762 : if (relentry != NULL)
2436 : {
2437 8668 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
2438 8668 : &relentry->composite_typid,
2439 : HASH_FIND, NULL);
2440 :
2441 8668 : if (typentry != NULL)
2442 : {
2443 : Assert(typentry->typtype == TYPTYPE_COMPOSITE);
2444 : Assert(relid == typentry->typrelid);
2445 :
2446 8668 : InvalidateCompositeTypeCacheEntry(typentry);
2447 : }
2448 : }
2449 :
2450 : /*
2451 : * Visit all the domain types sequentially. Typically, this shouldn't
2452 : * affect performance since domain types are less tended to bloat.
2453 : * Domain types are created manually, unlike composite types which are
2454 : * automatically created for every temporary table.
2455 : */
2456 3035710 : for (typentry = firstDomainTypeEntry;
2457 : typentry != NULL;
2458 1300948 : typentry = typentry->nextDomain)
2459 : {
2460 : /*
2461 : * If it's domain over composite, reset flags. (We don't bother
2462 : * trying to determine whether the specific base type needs a
2463 : * reset.) Note that if we haven't determined whether the base
2464 : * type is composite, we don't need to reset anything.
2465 : */
2466 1300948 : if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2467 0 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2468 : }
2469 : }
2470 : else
2471 : {
2472 : HASH_SEQ_STATUS status;
2473 :
2474 : /*
2475 : * Relid is invalid. By convention, we need to reset all composite
2476 : * types in cache. Also, we should reset flags for domain types, and
2477 : * we loop over all entries in hash, so, do it in a single scan.
2478 : */
2479 600 : hash_seq_init(&status, TypeCacheHash);
2480 2746 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2481 : {
2482 2146 : if (typentry->typtype == TYPTYPE_COMPOSITE)
2483 : {
2484 140 : InvalidateCompositeTypeCacheEntry(typentry);
2485 : }
2486 2006 : else if (typentry->typtype == TYPTYPE_DOMAIN)
2487 : {
2488 : /*
2489 : * If it's domain over composite, reset flags. (We don't
2490 : * bother trying to determine whether the specific base type
2491 : * needs a reset.) Note that if we haven't determined whether
2492 : * the base type is composite, we don't need to reset
2493 : * anything.
2494 : */
2495 12 : if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
2496 0 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2497 : }
2498 : }
2499 : }
2500 1735362 : }
2501 :
2502 : /*
2503 : * TypeCacheTypCallback
2504 : * Syscache inval callback function
2505 : *
2506 : * This is called when a syscache invalidation event occurs for any
2507 : * pg_type row. If we have information cached about that type, mark
2508 : * it as needing to be reloaded.
2509 : */
2510 : static void
2511 585676 : TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
2512 : {
2513 : HASH_SEQ_STATUS status;
2514 : TypeCacheEntry *typentry;
2515 :
2516 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2517 :
2518 : /*
2519 : * By convention, zero hash value is passed to the callback as a sign that
2520 : * it's time to invalidate the whole cache. See sinval.c, inval.c and
2521 : * InvalidateSystemCachesExtended().
2522 : */
2523 585676 : if (hashvalue == 0)
2524 484 : hash_seq_init(&status, TypeCacheHash);
2525 : else
2526 585192 : hash_seq_init_with_hash_value(&status, TypeCacheHash, hashvalue);
2527 :
2528 592136 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2529 : {
2530 6460 : bool hadPgTypeData = (typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA);
2531 :
2532 : Assert(hashvalue == 0 || typentry->type_id_hash == hashvalue);
2533 :
2534 : /*
2535 : * Mark the data obtained directly from pg_type as invalid. Also, if
2536 : * it's a domain, typnotnull might've changed, so we'll need to
2537 : * recalculate its constraints.
2538 : */
2539 6460 : typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
2540 : TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS);
2541 :
2542 : /*
2543 : * Call delete_rel_type_cache() if we cleaned
2544 : * TCFLAGS_HAVE_PG_TYPE_DATA flag previously.
2545 : */
2546 6460 : if (hadPgTypeData)
2547 3398 : delete_rel_type_cache_if_needed(typentry);
2548 : }
2549 585676 : }
2550 :
2551 : /*
2552 : * TypeCacheOpcCallback
2553 : * Syscache inval callback function
2554 : *
2555 : * This is called when a syscache invalidation event occurs for any pg_opclass
2556 : * row. In principle we could probably just invalidate data dependent on the
2557 : * particular opclass, but since updates on pg_opclass are rare in production
2558 : * it doesn't seem worth a lot of complication: we just mark all cached data
2559 : * invalid.
2560 : *
2561 : * Note that we don't bother watching for updates on pg_amop or pg_amproc.
2562 : * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
2563 : * is not allowed to be used to add/drop the primary operators and functions
2564 : * of an opclass, only cross-type members of a family; and the latter sorts
2565 : * of members are not going to get cached here.
2566 : */
2567 : static void
2568 1380 : TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
2569 : {
2570 : HASH_SEQ_STATUS status;
2571 : TypeCacheEntry *typentry;
2572 :
2573 : /* TypeCacheHash must exist, else this callback wouldn't be registered */
2574 1380 : hash_seq_init(&status, TypeCacheHash);
2575 8466 : while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
2576 : {
2577 : /* Reset equality/comparison/hashing validity information */
2578 7086 : typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
2579 : }
2580 1380 : }
2581 :
2582 : /*
2583 : * TypeCacheConstrCallback
2584 : * Syscache inval callback function
2585 : *
2586 : * This is called when a syscache invalidation event occurs for any
2587 : * pg_constraint row. We flush information about domain constraints
2588 : * when this happens.
2589 : *
2590 : * It's slightly annoying that we can't tell whether the inval event was for
2591 : * a domain constraint record or not; there's usually more update traffic
2592 : * for table constraints than domain constraints, so we'll do a lot of
2593 : * useless flushes. Still, this is better than the old no-caching-at-all
2594 : * approach to domain constraints.
2595 : */
2596 : static void
2597 161438 : TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
2598 : {
2599 : TypeCacheEntry *typentry;
2600 :
2601 : /*
2602 : * Because this is called very frequently, and typically very few of the
2603 : * typcache entries are for domains, we don't use hash_seq_search here.
2604 : * Instead we thread all the domain-type entries together so that we can
2605 : * visit them cheaply.
2606 : */
2607 308118 : for (typentry = firstDomainTypeEntry;
2608 : typentry != NULL;
2609 146680 : typentry = typentry->nextDomain)
2610 : {
2611 : /* Reset domain constraint validity information */
2612 146680 : typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
2613 : }
2614 161438 : }
2615 :
2616 :
2617 : /*
2618 : * Check if given OID is part of the subset that's sortable by comparisons
2619 : */
2620 : static inline bool
2621 300114 : enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
2622 : {
2623 : Oid offset;
2624 :
2625 300114 : if (arg < enumdata->bitmap_base)
2626 0 : return false;
2627 300114 : offset = arg - enumdata->bitmap_base;
2628 300114 : if (offset > (Oid) INT_MAX)
2629 0 : return false;
2630 300114 : return bms_is_member((int) offset, enumdata->sorted_values);
2631 : }
2632 :
2633 :
2634 : /*
2635 : * compare_values_of_enum
2636 : * Compare two members of an enum type.
2637 : * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
2638 : *
2639 : * Note: currently, the enumData cache is refreshed only if we are asked
2640 : * to compare an enum value that is not already in the cache. This is okay
2641 : * because there is no support for re-ordering existing values, so comparisons
2642 : * of previously cached values will return the right answer even if other
2643 : * values have been added since we last loaded the cache.
2644 : *
2645 : * Note: the enum logic has a special-case rule about even-numbered versus
2646 : * odd-numbered OIDs, but we take no account of that rule here; this
2647 : * routine shouldn't even get called when that rule applies.
2648 : */
2649 : int
2650 150074 : compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
2651 : {
2652 : TypeCacheEnumData *enumdata;
2653 : EnumItem *item1;
2654 : EnumItem *item2;
2655 :
2656 : /*
2657 : * Equal OIDs are certainly equal --- this case was probably handled by
2658 : * our caller, but we may as well check.
2659 : */
2660 150074 : if (arg1 == arg2)
2661 0 : return 0;
2662 :
2663 : /* Load up the cache if first time through */
2664 150074 : if (tcache->enumData == NULL)
2665 8 : load_enum_cache_data(tcache);
2666 150074 : enumdata = tcache->enumData;
2667 :
2668 : /*
2669 : * If both OIDs are known-sorted, we can just compare them directly.
2670 : */
2671 300114 : if (enum_known_sorted(enumdata, arg1) &&
2672 150040 : enum_known_sorted(enumdata, arg2))
2673 : {
2674 0 : if (arg1 < arg2)
2675 0 : return -1;
2676 : else
2677 0 : return 1;
2678 : }
2679 :
2680 : /*
2681 : * Slow path: we have to identify their actual sort-order positions.
2682 : */
2683 150074 : item1 = find_enumitem(enumdata, arg1);
2684 150074 : item2 = find_enumitem(enumdata, arg2);
2685 :
2686 150074 : if (item1 == NULL || item2 == NULL)
2687 : {
2688 : /*
2689 : * We couldn't find one or both values. That means the enum has
2690 : * changed under us, so re-initialize the cache and try again. We
2691 : * don't bother retrying the known-sorted case in this path.
2692 : */
2693 0 : load_enum_cache_data(tcache);
2694 0 : enumdata = tcache->enumData;
2695 :
2696 0 : item1 = find_enumitem(enumdata, arg1);
2697 0 : item2 = find_enumitem(enumdata, arg2);
2698 :
2699 : /*
2700 : * If we still can't find the values, complain: we must have corrupt
2701 : * data.
2702 : */
2703 0 : if (item1 == NULL)
2704 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2705 : arg1, format_type_be(tcache->type_id));
2706 0 : if (item2 == NULL)
2707 0 : elog(ERROR, "enum value %u not found in cache for enum %s",
2708 : arg2, format_type_be(tcache->type_id));
2709 : }
2710 :
2711 150074 : if (item1->sort_order < item2->sort_order)
2712 50024 : return -1;
2713 100050 : else if (item1->sort_order > item2->sort_order)
2714 100050 : return 1;
2715 : else
2716 0 : return 0;
2717 : }
2718 :
2719 : /*
2720 : * Load (or re-load) the enumData member of the typcache entry.
2721 : */
2722 : static void
2723 8 : load_enum_cache_data(TypeCacheEntry *tcache)
2724 : {
2725 : TypeCacheEnumData *enumdata;
2726 : Relation enum_rel;
2727 : SysScanDesc enum_scan;
2728 : HeapTuple enum_tuple;
2729 : ScanKeyData skey;
2730 : EnumItem *items;
2731 : int numitems;
2732 : int maxitems;
2733 : Oid bitmap_base;
2734 : Bitmapset *bitmap;
2735 : MemoryContext oldcxt;
2736 : int bm_size,
2737 : start_pos;
2738 :
2739 : /* Check that this is actually an enum */
2740 8 : if (tcache->typtype != TYPTYPE_ENUM)
2741 0 : ereport(ERROR,
2742 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
2743 : errmsg("%s is not an enum",
2744 : format_type_be(tcache->type_id))));
2745 :
2746 : /*
2747 : * Read all the information for members of the enum type. We collect the
2748 : * info in working memory in the caller's context, and then transfer it to
2749 : * permanent memory in CacheMemoryContext. This minimizes the risk of
2750 : * leaking memory from CacheMemoryContext in the event of an error partway
2751 : * through.
2752 : */
2753 8 : maxitems = 64;
2754 8 : items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
2755 8 : numitems = 0;
2756 :
2757 : /* Scan pg_enum for the members of the target enum type. */
2758 8 : ScanKeyInit(&skey,
2759 : Anum_pg_enum_enumtypid,
2760 : BTEqualStrategyNumber, F_OIDEQ,
2761 : ObjectIdGetDatum(tcache->type_id));
2762 :
2763 8 : enum_rel = table_open(EnumRelationId, AccessShareLock);
2764 8 : enum_scan = systable_beginscan(enum_rel,
2765 : EnumTypIdLabelIndexId,
2766 : true, NULL,
2767 : 1, &skey);
2768 :
2769 64 : while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
2770 : {
2771 56 : Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
2772 :
2773 56 : if (numitems >= maxitems)
2774 : {
2775 0 : maxitems *= 2;
2776 0 : items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
2777 : }
2778 56 : items[numitems].enum_oid = en->oid;
2779 56 : items[numitems].sort_order = en->enumsortorder;
2780 56 : numitems++;
2781 : }
2782 :
2783 8 : systable_endscan(enum_scan);
2784 8 : table_close(enum_rel, AccessShareLock);
2785 :
2786 : /* Sort the items into OID order */
2787 8 : qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
2788 :
2789 : /*
2790 : * Here, we create a bitmap listing a subset of the enum's OIDs that are
2791 : * known to be in order and can thus be compared with just OID comparison.
2792 : *
2793 : * The point of this is that the enum's initial OIDs were certainly in
2794 : * order, so there is some subset that can be compared via OID comparison;
2795 : * and we'd rather not do binary searches unnecessarily.
2796 : *
2797 : * This is somewhat heuristic, and might identify a subset of OIDs that
2798 : * isn't exactly what the type started with. That's okay as long as the
2799 : * subset is correctly sorted.
2800 : */
2801 8 : bitmap_base = InvalidOid;
2802 8 : bitmap = NULL;
2803 8 : bm_size = 1; /* only save sets of at least 2 OIDs */
2804 :
2805 20 : for (start_pos = 0; start_pos < numitems - 1; start_pos++)
2806 : {
2807 : /*
2808 : * Identify longest sorted subsequence starting at start_pos
2809 : */
2810 20 : Bitmapset *this_bitmap = bms_make_singleton(0);
2811 20 : int this_bm_size = 1;
2812 20 : Oid start_oid = items[start_pos].enum_oid;
2813 20 : float4 prev_order = items[start_pos].sort_order;
2814 : int i;
2815 :
2816 134 : for (i = start_pos + 1; i < numitems; i++)
2817 : {
2818 : Oid offset;
2819 :
2820 114 : offset = items[i].enum_oid - start_oid;
2821 : /* quit if bitmap would be too large; cutoff is arbitrary */
2822 114 : if (offset >= 8192)
2823 0 : break;
2824 : /* include the item if it's in-order */
2825 114 : if (items[i].sort_order > prev_order)
2826 : {
2827 58 : prev_order = items[i].sort_order;
2828 58 : this_bitmap = bms_add_member(this_bitmap, (int) offset);
2829 58 : this_bm_size++;
2830 : }
2831 : }
2832 :
2833 : /* Remember it if larger than previous best */
2834 20 : if (this_bm_size > bm_size)
2835 : {
2836 8 : bms_free(bitmap);
2837 8 : bitmap_base = start_oid;
2838 8 : bitmap = this_bitmap;
2839 8 : bm_size = this_bm_size;
2840 : }
2841 : else
2842 12 : bms_free(this_bitmap);
2843 :
2844 : /*
2845 : * Done if it's not possible to find a longer sequence in the rest of
2846 : * the list. In typical cases this will happen on the first
2847 : * iteration, which is why we create the bitmaps on the fly instead of
2848 : * doing a second pass over the list.
2849 : */
2850 20 : if (bm_size >= (numitems - start_pos - 1))
2851 8 : break;
2852 : }
2853 :
2854 : /* OK, copy the data into CacheMemoryContext */
2855 8 : oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
2856 : enumdata = (TypeCacheEnumData *)
2857 8 : palloc(offsetof(TypeCacheEnumData, enum_values) +
2858 8 : numitems * sizeof(EnumItem));
2859 8 : enumdata->bitmap_base = bitmap_base;
2860 8 : enumdata->sorted_values = bms_copy(bitmap);
2861 8 : enumdata->num_values = numitems;
2862 8 : memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
2863 8 : MemoryContextSwitchTo(oldcxt);
2864 :
2865 8 : pfree(items);
2866 8 : bms_free(bitmap);
2867 :
2868 : /* And link the finished cache struct into the typcache */
2869 8 : if (tcache->enumData != NULL)
2870 0 : pfree(tcache->enumData);
2871 8 : tcache->enumData = enumdata;
2872 8 : }
2873 :
2874 : /*
2875 : * Locate the EnumItem with the given OID, if present
2876 : */
2877 : static EnumItem *
2878 300148 : find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
2879 : {
2880 : EnumItem srch;
2881 :
2882 : /* On some versions of Solaris, bsearch of zero items dumps core */
2883 300148 : if (enumdata->num_values <= 0)
2884 0 : return NULL;
2885 :
2886 300148 : srch.enum_oid = arg;
2887 300148 : return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
2888 : sizeof(EnumItem), enum_oid_cmp);
2889 : }
2890 :
2891 : /*
2892 : * qsort comparison function for OID-ordered EnumItems
2893 : */
2894 : static int
2895 600518 : enum_oid_cmp(const void *left, const void *right)
2896 : {
2897 600518 : const EnumItem *l = (const EnumItem *) left;
2898 600518 : const EnumItem *r = (const EnumItem *) right;
2899 :
2900 600518 : return pg_cmp_u32(l->enum_oid, r->enum_oid);
2901 : }
2902 :
2903 : /*
2904 : * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
2905 : * to the given value and return a dsa_pointer.
2906 : */
2907 : static dsa_pointer
2908 168 : share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
2909 : {
2910 : dsa_pointer shared_dp;
2911 : TupleDesc shared;
2912 :
2913 168 : shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
2914 168 : shared = (TupleDesc) dsa_get_address(area, shared_dp);
2915 168 : TupleDescCopy(shared, tupdesc);
2916 168 : shared->tdtypmod = typmod;
2917 :
2918 168 : return shared_dp;
2919 : }
2920 :
2921 : /*
2922 : * If we are attached to a SharedRecordTypmodRegistry, use it to find or
2923 : * create a shared TupleDesc that matches 'tupdesc'. Otherwise return NULL.
2924 : * Tuple descriptors returned by this function are not reference counted, and
2925 : * will exist at least as long as the current backend remained attached to the
2926 : * current session.
2927 : */
2928 : static TupleDesc
2929 14828 : find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
2930 : {
2931 : TupleDesc result;
2932 : SharedRecordTableKey key;
2933 : SharedRecordTableEntry *record_table_entry;
2934 : SharedTypmodTableEntry *typmod_table_entry;
2935 : dsa_pointer shared_dp;
2936 : bool found;
2937 : uint32 typmod;
2938 :
2939 : /* If not even attached, nothing to do. */
2940 14828 : if (CurrentSession->shared_typmod_registry == NULL)
2941 14758 : return NULL;
2942 :
2943 : /* Try to find a matching tuple descriptor in the record table. */
2944 70 : key.shared = false;
2945 70 : key.u.local_tupdesc = tupdesc;
2946 : record_table_entry = (SharedRecordTableEntry *)
2947 70 : dshash_find(CurrentSession->shared_record_table, &key, false);
2948 70 : if (record_table_entry)
2949 : {
2950 : Assert(record_table_entry->key.shared);
2951 8 : dshash_release_lock(CurrentSession->shared_record_table,
2952 : record_table_entry);
2953 : result = (TupleDesc)
2954 8 : dsa_get_address(CurrentSession->area,
2955 : record_table_entry->key.u.shared_tupdesc);
2956 : Assert(result->tdrefcount == -1);
2957 :
2958 8 : return result;
2959 : }
2960 :
2961 : /* Allocate a new typmod number. This will be wasted if we error out. */
2962 62 : typmod = (int)
2963 62 : pg_atomic_fetch_add_u32(&CurrentSession->shared_typmod_registry->next_typmod,
2964 : 1);
2965 :
2966 : /* Copy the TupleDesc into shared memory. */
2967 62 : shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
2968 :
2969 : /*
2970 : * Create an entry in the typmod table so that others will understand this
2971 : * typmod number.
2972 : */
2973 62 : PG_TRY();
2974 : {
2975 : typmod_table_entry = (SharedTypmodTableEntry *)
2976 62 : dshash_find_or_insert(CurrentSession->shared_typmod_table,
2977 : &typmod, &found);
2978 62 : if (found)
2979 0 : elog(ERROR, "cannot create duplicate shared record typmod");
2980 : }
2981 0 : PG_CATCH();
2982 : {
2983 0 : dsa_free(CurrentSession->area, shared_dp);
2984 0 : PG_RE_THROW();
2985 : }
2986 62 : PG_END_TRY();
2987 62 : typmod_table_entry->typmod = typmod;
2988 62 : typmod_table_entry->shared_tupdesc = shared_dp;
2989 62 : dshash_release_lock(CurrentSession->shared_typmod_table,
2990 : typmod_table_entry);
2991 :
2992 : /*
2993 : * Finally create an entry in the record table so others with matching
2994 : * tuple descriptors can reuse the typmod.
2995 : */
2996 : record_table_entry = (SharedRecordTableEntry *)
2997 62 : dshash_find_or_insert(CurrentSession->shared_record_table, &key,
2998 : &found);
2999 62 : if (found)
3000 : {
3001 : /*
3002 : * Someone concurrently inserted a matching tuple descriptor since the
3003 : * first time we checked. Use that one instead.
3004 : */
3005 0 : dshash_release_lock(CurrentSession->shared_record_table,
3006 : record_table_entry);
3007 :
3008 : /* Might as well free up the space used by the one we created. */
3009 0 : found = dshash_delete_key(CurrentSession->shared_typmod_table,
3010 : &typmod);
3011 : Assert(found);
3012 0 : dsa_free(CurrentSession->area, shared_dp);
3013 :
3014 : /* Return the one we found. */
3015 : Assert(record_table_entry->key.shared);
3016 : result = (TupleDesc)
3017 0 : dsa_get_address(CurrentSession->area,
3018 : record_table_entry->key.u.shared_tupdesc);
3019 : Assert(result->tdrefcount == -1);
3020 :
3021 0 : return result;
3022 : }
3023 :
3024 : /* Store it and return it. */
3025 62 : record_table_entry->key.shared = true;
3026 62 : record_table_entry->key.u.shared_tupdesc = shared_dp;
3027 62 : dshash_release_lock(CurrentSession->shared_record_table,
3028 : record_table_entry);
3029 : result = (TupleDesc)
3030 62 : dsa_get_address(CurrentSession->area, shared_dp);
3031 : Assert(result->tdrefcount == -1);
3032 :
3033 62 : return result;
3034 : }
3035 :
3036 : /*
3037 : * On-DSM-detach hook to forget about the current shared record typmod
3038 : * infrastructure. This is currently used by both leader and workers.
3039 : */
3040 : static void
3041 2850 : shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
3042 : {
3043 : /* Be cautious here: maybe we didn't finish initializing. */
3044 2850 : if (CurrentSession->shared_record_table != NULL)
3045 : {
3046 2850 : dshash_detach(CurrentSession->shared_record_table);
3047 2850 : CurrentSession->shared_record_table = NULL;
3048 : }
3049 2850 : if (CurrentSession->shared_typmod_table != NULL)
3050 : {
3051 2850 : dshash_detach(CurrentSession->shared_typmod_table);
3052 2850 : CurrentSession->shared_typmod_table = NULL;
3053 : }
3054 2850 : CurrentSession->shared_typmod_registry = NULL;
3055 2850 : }
3056 :
3057 : /*
3058 : * Insert RelIdToTypeIdCacheHash entry if needed.
3059 : */
3060 : static void
3061 663380 : insert_rel_type_cache_if_needed(TypeCacheEntry *typentry)
3062 : {
3063 : /* Immediately quit for non-composite types */
3064 663380 : if (typentry->typtype != TYPTYPE_COMPOSITE)
3065 581054 : return;
3066 :
3067 : /* typrelid should be given for composite types */
3068 : Assert(OidIsValid(typentry->typrelid));
3069 :
3070 : /*
3071 : * Insert a RelIdToTypeIdCacheHash entry if the typentry have any
3072 : * information indicating it should be here.
3073 : */
3074 82326 : if ((typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) ||
3075 0 : (typentry->flags & TCFLAGS_OPERATOR_FLAGS) ||
3076 0 : typentry->tupDesc != NULL)
3077 : {
3078 : RelIdToTypeIdCacheEntry *relentry;
3079 : bool found;
3080 :
3081 82326 : relentry = (RelIdToTypeIdCacheEntry *) hash_search(RelIdToTypeIdCacheHash,
3082 82326 : &typentry->typrelid,
3083 : HASH_ENTER, &found);
3084 82326 : relentry->relid = typentry->typrelid;
3085 82326 : relentry->composite_typid = typentry->type_id;
3086 : }
3087 : }
3088 :
3089 : /*
3090 : * Delete entry RelIdToTypeIdCacheHash if needed after resetting of the
3091 : * TCFLAGS_HAVE_PG_TYPE_DATA flag, or any of TCFLAGS_OPERATOR_FLAGS,
3092 : * or tupDesc.
3093 : */
3094 : static void
3095 6372 : delete_rel_type_cache_if_needed(TypeCacheEntry *typentry)
3096 : {
3097 : #ifdef USE_ASSERT_CHECKING
3098 : int i;
3099 : bool is_in_progress = false;
3100 :
3101 : for (i = 0; i < in_progress_list_len; i++)
3102 : {
3103 : if (in_progress_list[i] == typentry->type_id)
3104 : {
3105 : is_in_progress = true;
3106 : break;
3107 : }
3108 : }
3109 : #endif
3110 :
3111 : /* Immediately quit for non-composite types */
3112 6372 : if (typentry->typtype != TYPTYPE_COMPOSITE)
3113 1384 : return;
3114 :
3115 : /* typrelid should be given for composite types */
3116 : Assert(OidIsValid(typentry->typrelid));
3117 :
3118 : /*
3119 : * Delete a RelIdToTypeIdCacheHash entry if the typentry doesn't have any
3120 : * information indicating entry should be still there.
3121 : */
3122 4988 : if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA) &&
3123 2722 : !(typentry->flags & TCFLAGS_OPERATOR_FLAGS) &&
3124 2638 : typentry->tupDesc == NULL)
3125 : {
3126 : bool found;
3127 :
3128 2008 : (void) hash_search(RelIdToTypeIdCacheHash,
3129 2008 : &typentry->typrelid,
3130 : HASH_REMOVE, &found);
3131 : Assert(found || is_in_progress);
3132 : }
3133 : else
3134 : {
3135 : #ifdef USE_ASSERT_CHECKING
3136 : /*
3137 : * In assert-enabled builds otherwise check for RelIdToTypeIdCacheHash
3138 : * entry if it should exist.
3139 : */
3140 : bool found;
3141 :
3142 : if (!is_in_progress)
3143 : {
3144 : (void) hash_search(RelIdToTypeIdCacheHash,
3145 : &typentry->typrelid,
3146 : HASH_FIND, &found);
3147 : Assert(found);
3148 : }
3149 : #endif
3150 : }
3151 : }
3152 :
3153 : /*
3154 : * Add possibly missing RelIdToTypeId entries related to TypeCacheHash
3155 : * entries, marked as in-progress by lookup_type_cache(). It may happen
3156 : * in case of an error or interruption during the lookup_type_cache() call.
3157 : */
3158 : static void
3159 769248 : finalize_in_progress_typentries(void)
3160 : {
3161 : int i;
3162 :
3163 769250 : for (i = 0; i < in_progress_list_len; i++)
3164 : {
3165 : TypeCacheEntry *typentry;
3166 :
3167 2 : typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
3168 2 : &in_progress_list[i],
3169 : HASH_FIND, NULL);
3170 2 : if (typentry)
3171 2 : insert_rel_type_cache_if_needed(typentry);
3172 : }
3173 :
3174 769248 : in_progress_list_len = 0;
3175 769248 : }
3176 :
3177 : void
3178 749204 : AtEOXact_TypeCache(void)
3179 : {
3180 749204 : finalize_in_progress_typentries();
3181 749204 : }
3182 :
3183 : void
3184 20044 : AtEOSubXact_TypeCache(void)
3185 : {
3186 20044 : finalize_in_progress_typentries();
3187 20044 : }
|