LCOV - code coverage report
Current view: top level - src/backend/utils/cache - typcache.c (source / functions) Hit Total Coverage
Test: PostgreSQL 15devel Lines: 791 902 87.7 %
Date: 2021-12-09 03:08:47 Functions: 53 55 96.4 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * typcache.c
       4             :  *    POSTGRES type cache code
       5             :  *
       6             :  * The type cache exists to speed lookup of certain information about data
       7             :  * types that is not directly available from a type's pg_type row.  For
       8             :  * example, we use a type's default btree opclass, or the default hash
       9             :  * opclass if no btree opclass exists, to determine which operators should
      10             :  * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
      11             :  *
      12             :  * Several seemingly-odd choices have been made to support use of the type
      13             :  * cache by generic array and record handling routines, such as array_eq(),
      14             :  * record_cmp(), and hash_array().  Because those routines are used as index
      15             :  * support operations, they cannot leak memory.  To allow them to execute
      16             :  * efficiently, all information that they would like to re-use across calls
      17             :  * is kept in the type cache.
      18             :  *
      19             :  * Once created, a type cache entry lives as long as the backend does, so
      20             :  * there is no need for a call to release a cache entry.  If the type is
      21             :  * dropped, the cache entry simply becomes wasted storage.  This is not
      22             :  * expected to happen often, and assuming that typcache entries are good
      23             :  * permanently allows caching pointers to them in long-lived places.
      24             :  *
      25             :  * We have some provisions for updating cache entries if the stored data
      26             :  * becomes obsolete.  Core data extracted from the pg_type row is updated
      27             :  * when we detect updates to pg_type.  Information dependent on opclasses is
      28             :  * cleared if we detect updates to pg_opclass.  We also support clearing the
      29             :  * tuple descriptor and operator/function parts of a rowtype's cache entry,
      30             :  * since those may need to change as a consequence of ALTER TABLE.  Domain
      31             :  * constraint changes are also tracked properly.
      32             :  *
      33             :  *
      34             :  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
      35             :  * Portions Copyright (c) 1994, Regents of the University of California
      36             :  *
      37             :  * IDENTIFICATION
      38             :  *    src/backend/utils/cache/typcache.c
      39             :  *
      40             :  *-------------------------------------------------------------------------
      41             :  */
      42             : #include "postgres.h"
      43             : 
      44             : #include <limits.h>
      45             : 
      46             : #include "access/hash.h"
      47             : #include "access/htup_details.h"
      48             : #include "access/nbtree.h"
      49             : #include "access/parallel.h"
      50             : #include "access/relation.h"
      51             : #include "access/session.h"
      52             : #include "access/table.h"
      53             : #include "catalog/pg_am.h"
      54             : #include "catalog/pg_constraint.h"
      55             : #include "catalog/pg_enum.h"
      56             : #include "catalog/pg_operator.h"
      57             : #include "catalog/pg_range.h"
      58             : #include "catalog/pg_type.h"
      59             : #include "commands/defrem.h"
      60             : #include "executor/executor.h"
      61             : #include "lib/dshash.h"
      62             : #include "optimizer/optimizer.h"
      63             : #include "port/pg_bitutils.h"
      64             : #include "storage/lwlock.h"
      65             : #include "utils/builtins.h"
      66             : #include "utils/catcache.h"
      67             : #include "utils/fmgroids.h"
      68             : #include "utils/inval.h"
      69             : #include "utils/lsyscache.h"
      70             : #include "utils/memutils.h"
      71             : #include "utils/rel.h"
      72             : #include "utils/snapmgr.h"
      73             : #include "utils/syscache.h"
      74             : #include "utils/typcache.h"
      75             : 
      76             : 
      77             : /* The main type cache hashtable searched by lookup_type_cache */
      78             : static HTAB *TypeCacheHash = NULL;
      79             : 
      80             : /* List of type cache entries for domain types */
      81             : static TypeCacheEntry *firstDomainTypeEntry = NULL;
      82             : 
      83             : /* Private flag bits in the TypeCacheEntry.flags field */
      84             : #define TCFLAGS_HAVE_PG_TYPE_DATA           0x000001
      85             : #define TCFLAGS_CHECKED_BTREE_OPCLASS       0x000002
      86             : #define TCFLAGS_CHECKED_HASH_OPCLASS        0x000004
      87             : #define TCFLAGS_CHECKED_EQ_OPR              0x000008
      88             : #define TCFLAGS_CHECKED_LT_OPR              0x000010
      89             : #define TCFLAGS_CHECKED_GT_OPR              0x000020
      90             : #define TCFLAGS_CHECKED_CMP_PROC            0x000040
      91             : #define TCFLAGS_CHECKED_HASH_PROC           0x000080
      92             : #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC  0x000100
      93             : #define TCFLAGS_CHECKED_ELEM_PROPERTIES     0x000200
      94             : #define TCFLAGS_HAVE_ELEM_EQUALITY          0x000400
      95             : #define TCFLAGS_HAVE_ELEM_COMPARE           0x000800
      96             : #define TCFLAGS_HAVE_ELEM_HASHING           0x001000
      97             : #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING  0x002000
      98             : #define TCFLAGS_CHECKED_FIELD_PROPERTIES    0x004000
      99             : #define TCFLAGS_HAVE_FIELD_EQUALITY         0x008000
     100             : #define TCFLAGS_HAVE_FIELD_COMPARE          0x010000
     101             : #define TCFLAGS_HAVE_FIELD_HASHING          0x020000
     102             : #define TCFLAGS_HAVE_FIELD_EXTENDED_HASHING 0x040000
     103             : #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS  0x080000
     104             : #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE    0x100000
     105             : 
     106             : /* The flags associated with equality/comparison/hashing are all but these: */
     107             : #define TCFLAGS_OPERATOR_FLAGS \
     108             :     (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
     109             :        TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
     110             :        TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
     111             : 
     112             : /*
     113             :  * Data stored about a domain type's constraints.  Note that we do not create
     114             :  * this struct for the common case of a constraint-less domain; we just set
     115             :  * domainData to NULL to indicate that.
     116             :  *
     117             :  * Within a DomainConstraintCache, we store expression plan trees, but the
     118             :  * check_exprstate fields of the DomainConstraintState nodes are just NULL.
     119             :  * When needed, expression evaluation nodes are built by flat-copying the
     120             :  * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
     121             :  * Such a node tree is not part of the DomainConstraintCache, but is
     122             :  * considered to belong to a DomainConstraintRef.
     123             :  */
     124             : struct DomainConstraintCache
     125             : {
     126             :     List       *constraints;    /* list of DomainConstraintState nodes */
     127             :     MemoryContext dccContext;   /* memory context holding all associated data */
     128             :     long        dccRefCount;    /* number of references to this struct */
     129             : };
     130             : 
     131             : /* Private information to support comparisons of enum values */
     132             : typedef struct
     133             : {
     134             :     Oid         enum_oid;       /* OID of one enum value */
     135             :     float4      sort_order;     /* its sort position */
     136             : } EnumItem;
     137             : 
     138             : typedef struct TypeCacheEnumData
     139             : {
     140             :     Oid         bitmap_base;    /* OID corresponding to bit 0 of bitmapset */
     141             :     Bitmapset  *sorted_values;  /* Set of OIDs known to be in order */
     142             :     int         num_values;     /* total number of values in enum */
     143             :     EnumItem    enum_values[FLEXIBLE_ARRAY_MEMBER];
     144             : } TypeCacheEnumData;
     145             : 
     146             : /*
     147             :  * We use a separate table for storing the definitions of non-anonymous
     148             :  * record types.  Once defined, a record type will be remembered for the
     149             :  * life of the backend.  Subsequent uses of the "same" record type (where
     150             :  * sameness means equalTupleDescs) will refer to the existing table entry.
     151             :  *
     152             :  * Stored record types are remembered in a linear array of TupleDescs,
     153             :  * which can be indexed quickly with the assigned typmod.  There is also
     154             :  * a hash table to speed searches for matching TupleDescs.
     155             :  */
     156             : 
     157             : typedef struct RecordCacheEntry
     158             : {
     159             :     TupleDesc   tupdesc;
     160             : } RecordCacheEntry;
     161             : 
     162             : /*
     163             :  * To deal with non-anonymous record types that are exchanged by backends
     164             :  * involved in a parallel query, we also need a shared version of the above.
     165             :  */
     166             : struct SharedRecordTypmodRegistry
     167             : {
     168             :     /* A hash table for finding a matching TupleDesc. */
     169             :     dshash_table_handle record_table_handle;
     170             :     /* A hash table for finding a TupleDesc by typmod. */
     171             :     dshash_table_handle typmod_table_handle;
     172             :     /* A source of new record typmod numbers. */
     173             :     pg_atomic_uint32 next_typmod;
     174             : };
     175             : 
     176             : /*
     177             :  * When using shared tuple descriptors as hash table keys we need a way to be
     178             :  * able to search for an equal shared TupleDesc using a backend-local
     179             :  * TupleDesc.  So we use this type which can hold either, and hash and compare
     180             :  * functions that know how to handle both.
     181             :  */
     182             : typedef struct SharedRecordTableKey
     183             : {
     184             :     union
     185             :     {
     186             :         TupleDesc   local_tupdesc;
     187             :         dsa_pointer shared_tupdesc;
     188             :     }           u;
     189             :     bool        shared;
     190             : } SharedRecordTableKey;
     191             : 
     192             : /*
     193             :  * The shared version of RecordCacheEntry.  This lets us look up a typmod
     194             :  * using a TupleDesc which may be in local or shared memory.
     195             :  */
     196             : typedef struct SharedRecordTableEntry
     197             : {
     198             :     SharedRecordTableKey key;
     199             : } SharedRecordTableEntry;
     200             : 
     201             : /*
     202             :  * An entry in SharedRecordTypmodRegistry's typmod table.  This lets us look
     203             :  * up a TupleDesc in shared memory using a typmod.
     204             :  */
     205             : typedef struct SharedTypmodTableEntry
     206             : {
     207             :     uint32      typmod;
     208             :     dsa_pointer shared_tupdesc;
     209             : } SharedTypmodTableEntry;
     210             : 
     211             : /*
     212             :  * A comparator function for SharedRecordTableKey.
     213             :  */
     214             : static int
     215          16 : shared_record_table_compare(const void *a, const void *b, size_t size,
     216             :                             void *arg)
     217             : {
     218          16 :     dsa_area   *area = (dsa_area *) arg;
     219          16 :     SharedRecordTableKey *k1 = (SharedRecordTableKey *) a;
     220          16 :     SharedRecordTableKey *k2 = (SharedRecordTableKey *) b;
     221             :     TupleDesc   t1;
     222             :     TupleDesc   t2;
     223             : 
     224          16 :     if (k1->shared)
     225           0 :         t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
     226             :     else
     227          16 :         t1 = k1->u.local_tupdesc;
     228             : 
     229          16 :     if (k2->shared)
     230          16 :         t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
     231             :     else
     232           0 :         t2 = k2->u.local_tupdesc;
     233             : 
     234          16 :     return equalTupleDescs(t1, t2) ? 0 : 1;
     235             : }
     236             : 
     237             : /*
     238             :  * A hash function for SharedRecordTableKey.
     239             :  */
     240             : static uint32
     241          72 : shared_record_table_hash(const void *a, size_t size, void *arg)
     242             : {
     243          72 :     dsa_area   *area = (dsa_area *) arg;
     244          72 :     SharedRecordTableKey *k = (SharedRecordTableKey *) a;
     245             :     TupleDesc   t;
     246             : 
     247          72 :     if (k->shared)
     248           0 :         t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
     249             :     else
     250          72 :         t = k->u.local_tupdesc;
     251             : 
     252          72 :     return hashTupleDesc(t);
     253             : }
     254             : 
     255             : /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
     256             : static const dshash_parameters srtr_record_table_params = {
     257             :     sizeof(SharedRecordTableKey),   /* unused */
     258             :     sizeof(SharedRecordTableEntry),
     259             :     shared_record_table_compare,
     260             :     shared_record_table_hash,
     261             :     LWTRANCHE_PER_SESSION_RECORD_TYPE
     262             : };
     263             : 
     264             : /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
     265             : static const dshash_parameters srtr_typmod_table_params = {
     266             :     sizeof(uint32),
     267             :     sizeof(SharedTypmodTableEntry),
     268             :     dshash_memcmp,
     269             :     dshash_memhash,
     270             :     LWTRANCHE_PER_SESSION_RECORD_TYPMOD
     271             : };
     272             : 
     273             : /* hashtable for recognizing registered record types */
     274             : static HTAB *RecordCacheHash = NULL;
     275             : 
     276             : /* arrays of info about registered record types, indexed by assigned typmod */
     277             : static TupleDesc *RecordCacheArray = NULL;
     278             : static uint64 *RecordIdentifierArray = NULL;
     279             : static int32 RecordCacheArrayLen = 0;   /* allocated length of above arrays */
     280             : static int32 NextRecordTypmod = 0;  /* number of entries used */
     281             : 
     282             : /*
     283             :  * Process-wide counter for generating unique tupledesc identifiers.
     284             :  * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
     285             :  * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
     286             :  */
     287             : static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER;
     288             : 
     289             : static void load_typcache_tupdesc(TypeCacheEntry *typentry);
     290             : static void load_rangetype_info(TypeCacheEntry *typentry);
     291             : static void load_multirangetype_info(TypeCacheEntry *typentry);
     292             : static void load_domaintype_info(TypeCacheEntry *typentry);
     293             : static int  dcs_cmp(const void *a, const void *b);
     294             : static void decr_dcc_refcount(DomainConstraintCache *dcc);
     295             : static void dccref_deletion_callback(void *arg);
     296             : static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
     297             : static bool array_element_has_equality(TypeCacheEntry *typentry);
     298             : static bool array_element_has_compare(TypeCacheEntry *typentry);
     299             : static bool array_element_has_hashing(TypeCacheEntry *typentry);
     300             : static bool array_element_has_extended_hashing(TypeCacheEntry *typentry);
     301             : static void cache_array_element_properties(TypeCacheEntry *typentry);
     302             : static bool record_fields_have_equality(TypeCacheEntry *typentry);
     303             : static bool record_fields_have_compare(TypeCacheEntry *typentry);
     304             : static bool record_fields_have_hashing(TypeCacheEntry *typentry);
     305             : static bool record_fields_have_extended_hashing(TypeCacheEntry *typentry);
     306             : static void cache_record_field_properties(TypeCacheEntry *typentry);
     307             : static bool range_element_has_hashing(TypeCacheEntry *typentry);
     308             : static bool range_element_has_extended_hashing(TypeCacheEntry *typentry);
     309             : static void cache_range_element_properties(TypeCacheEntry *typentry);
     310             : static bool multirange_element_has_hashing(TypeCacheEntry *typentry);
     311             : static bool multirange_element_has_extended_hashing(TypeCacheEntry *typentry);
     312             : static void cache_multirange_element_properties(TypeCacheEntry *typentry);
     313             : static void TypeCacheRelCallback(Datum arg, Oid relid);
     314             : static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
     315             : static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
     316             : static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
     317             : static void load_enum_cache_data(TypeCacheEntry *tcache);
     318             : static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
     319             : static int  enum_oid_cmp(const void *left, const void *right);
     320             : static void shared_record_typmod_registry_detach(dsm_segment *segment,
     321             :                                                  Datum datum);
     322             : static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc);
     323             : static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
     324             :                                    uint32 typmod);
     325             : 
     326             : 
     327             : /*
     328             :  * lookup_type_cache
     329             :  *
     330             :  * Fetch the type cache entry for the specified datatype, and make sure that
     331             :  * all the fields requested by bits in 'flags' are valid.
     332             :  *
     333             :  * The result is never NULL --- we will ereport() if the passed type OID is
     334             :  * invalid.  Note however that we may fail to find one or more of the
     335             :  * values requested by 'flags'; the caller needs to check whether the fields
     336             :  * are InvalidOid or not.
     337             :  */
     338             : TypeCacheEntry *
     339      893540 : lookup_type_cache(Oid type_id, int flags)
     340             : {
     341             :     TypeCacheEntry *typentry;
     342             :     bool        found;
     343             : 
     344      893540 :     if (TypeCacheHash == NULL)
     345             :     {
     346             :         /* First time through: initialize the hash table */
     347             :         HASHCTL     ctl;
     348             : 
     349        4436 :         ctl.keysize = sizeof(Oid);
     350        4436 :         ctl.entrysize = sizeof(TypeCacheEntry);
     351        4436 :         TypeCacheHash = hash_create("Type information cache", 64,
     352             :                                     &ctl, HASH_ELEM | HASH_BLOBS);
     353             : 
     354             :         /* Also set up callbacks for SI invalidations */
     355        4436 :         CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
     356        4436 :         CacheRegisterSyscacheCallback(TYPEOID, TypeCacheTypCallback, (Datum) 0);
     357        4436 :         CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
     358        4436 :         CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
     359             : 
     360             :         /* Also make sure CacheMemoryContext exists */
     361        4436 :         if (!CacheMemoryContext)
     362           0 :             CreateCacheMemoryContext();
     363             :     }
     364             : 
     365             :     /* Try to look up an existing entry */
     366      893540 :     typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
     367             :                                               (void *) &type_id,
     368             :                                               HASH_FIND, NULL);
     369      893540 :     if (typentry == NULL)
     370             :     {
     371             :         /*
     372             :          * If we didn't find one, we want to make one.  But first look up the
     373             :          * pg_type row, just to make sure we don't make a cache entry for an
     374             :          * invalid type OID.  If the type OID is not valid, present a
     375             :          * user-facing error, since some code paths such as domain_in() allow
     376             :          * this function to be reached with a user-supplied OID.
     377             :          */
     378             :         HeapTuple   tp;
     379             :         Form_pg_type typtup;
     380             : 
     381       32028 :         tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
     382       32028 :         if (!HeapTupleIsValid(tp))
     383           0 :             ereport(ERROR,
     384             :                     (errcode(ERRCODE_UNDEFINED_OBJECT),
     385             :                      errmsg("type with OID %u does not exist", type_id)));
     386       32028 :         typtup = (Form_pg_type) GETSTRUCT(tp);
     387       32028 :         if (!typtup->typisdefined)
     388           0 :             ereport(ERROR,
     389             :                     (errcode(ERRCODE_UNDEFINED_OBJECT),
     390             :                      errmsg("type \"%s\" is only a shell",
     391             :                             NameStr(typtup->typname))));
     392             : 
     393             :         /* Now make the typcache entry */
     394       32028 :         typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
     395             :                                                   (void *) &type_id,
     396             :                                                   HASH_ENTER, &found);
     397             :         Assert(!found);         /* it wasn't there a moment ago */
     398             : 
     399     1985736 :         MemSet(typentry, 0, sizeof(TypeCacheEntry));
     400             : 
     401             :         /* These fields can never change, by definition */
     402       32028 :         typentry->type_id = type_id;
     403       32028 :         typentry->type_id_hash = GetSysCacheHashValue1(TYPEOID,
     404             :                                                        ObjectIdGetDatum(type_id));
     405             : 
     406             :         /* Keep this part in sync with the code below */
     407       32028 :         typentry->typlen = typtup->typlen;
     408       32028 :         typentry->typbyval = typtup->typbyval;
     409       32028 :         typentry->typalign = typtup->typalign;
     410       32028 :         typentry->typstorage = typtup->typstorage;
     411       32028 :         typentry->typtype = typtup->typtype;
     412       32028 :         typentry->typrelid = typtup->typrelid;
     413       32028 :         typentry->typsubscript = typtup->typsubscript;
     414       32028 :         typentry->typelem = typtup->typelem;
     415       32028 :         typentry->typcollation = typtup->typcollation;
     416       32028 :         typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
     417             : 
     418             :         /* If it's a domain, immediately thread it into the domain cache list */
     419       32028 :         if (typentry->typtype == TYPTYPE_DOMAIN)
     420             :         {
     421        2542 :             typentry->nextDomain = firstDomainTypeEntry;
     422        2542 :             firstDomainTypeEntry = typentry;
     423             :         }
     424             : 
     425       32028 :         ReleaseSysCache(tp);
     426             :     }
     427      861512 :     else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
     428             :     {
     429             :         /*
     430             :          * We have an entry, but its pg_type row got changed, so reload the
     431             :          * data obtained directly from pg_type.
     432             :          */
     433             :         HeapTuple   tp;
     434             :         Form_pg_type typtup;
     435             : 
     436         280 :         tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
     437         280 :         if (!HeapTupleIsValid(tp))
     438           0 :             ereport(ERROR,
     439             :                     (errcode(ERRCODE_UNDEFINED_OBJECT),
     440             :                      errmsg("type with OID %u does not exist", type_id)));
     441         280 :         typtup = (Form_pg_type) GETSTRUCT(tp);
     442         280 :         if (!typtup->typisdefined)
     443           0 :             ereport(ERROR,
     444             :                     (errcode(ERRCODE_UNDEFINED_OBJECT),
     445             :                      errmsg("type \"%s\" is only a shell",
     446             :                             NameStr(typtup->typname))));
     447             : 
     448             :         /*
     449             :          * Keep this part in sync with the code above.  Many of these fields
     450             :          * shouldn't ever change, particularly typtype, but copy 'em anyway.
     451             :          */
     452         280 :         typentry->typlen = typtup->typlen;
     453         280 :         typentry->typbyval = typtup->typbyval;
     454         280 :         typentry->typalign = typtup->typalign;
     455         280 :         typentry->typstorage = typtup->typstorage;
     456         280 :         typentry->typtype = typtup->typtype;
     457         280 :         typentry->typrelid = typtup->typrelid;
     458         280 :         typentry->typsubscript = typtup->typsubscript;
     459         280 :         typentry->typelem = typtup->typelem;
     460         280 :         typentry->typcollation = typtup->typcollation;
     461         280 :         typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
     462             : 
     463         280 :         ReleaseSysCache(tp);
     464             :     }
     465             : 
     466             :     /*
     467             :      * Look up opclasses if we haven't already and any dependent info is
     468             :      * requested.
     469             :      */
     470      893540 :     if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
     471             :                   TYPECACHE_CMP_PROC |
     472             :                   TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
     473      584348 :                   TYPECACHE_BTREE_OPFAMILY)) &&
     474      584348 :         !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
     475             :     {
     476             :         Oid         opclass;
     477             : 
     478       28402 :         opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
     479       28402 :         if (OidIsValid(opclass))
     480             :         {
     481       27014 :             typentry->btree_opf = get_opclass_family(opclass);
     482       27014 :             typentry->btree_opintype = get_opclass_input_type(opclass);
     483             :         }
     484             :         else
     485             :         {
     486        1388 :             typentry->btree_opf = typentry->btree_opintype = InvalidOid;
     487             :         }
     488             : 
     489             :         /*
     490             :          * Reset information derived from btree opclass.  Note in particular
     491             :          * that we'll redetermine the eq_opr even if we previously found one;
     492             :          * this matters in case a btree opclass has been added to a type that
     493             :          * previously had only a hash opclass.
     494             :          */
     495       28402 :         typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
     496             :                              TCFLAGS_CHECKED_LT_OPR |
     497             :                              TCFLAGS_CHECKED_GT_OPR |
     498             :                              TCFLAGS_CHECKED_CMP_PROC);
     499       28402 :         typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
     500             :     }
     501             : 
     502             :     /*
     503             :      * If we need to look up equality operator, and there's no btree opclass,
     504             :      * force lookup of hash opclass.
     505             :      */
     506      893540 :     if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
     507      568726 :         !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
     508       28244 :         typentry->btree_opf == InvalidOid)
     509        1388 :         flags |= TYPECACHE_HASH_OPFAMILY;
     510             : 
     511      893540 :     if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
     512             :                   TYPECACHE_HASH_EXTENDED_PROC |
     513             :                   TYPECACHE_HASH_EXTENDED_PROC_FINFO |
     514      248598 :                   TYPECACHE_HASH_OPFAMILY)) &&
     515      248598 :         !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
     516             :     {
     517             :         Oid         opclass;
     518             : 
     519       17078 :         opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
     520       17078 :         if (OidIsValid(opclass))
     521             :         {
     522       16908 :             typentry->hash_opf = get_opclass_family(opclass);
     523       16908 :             typentry->hash_opintype = get_opclass_input_type(opclass);
     524             :         }
     525             :         else
     526             :         {
     527         170 :             typentry->hash_opf = typentry->hash_opintype = InvalidOid;
     528             :         }
     529             : 
     530             :         /*
     531             :          * Reset information derived from hash opclass.  We do *not* reset the
     532             :          * eq_opr; if we already found one from the btree opclass, that
     533             :          * decision is still good.
     534             :          */
     535       17078 :         typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
     536             :                              TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
     537       17078 :         typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
     538             :     }
     539             : 
     540             :     /*
     541             :      * Look for requested operators and functions, if we haven't already.
     542             :      */
     543      893540 :     if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
     544      568726 :         !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
     545             :     {
     546       28244 :         Oid         eq_opr = InvalidOid;
     547             : 
     548       28244 :         if (typentry->btree_opf != InvalidOid)
     549       26856 :             eq_opr = get_opfamily_member(typentry->btree_opf,
     550             :                                          typentry->btree_opintype,
     551             :                                          typentry->btree_opintype,
     552             :                                          BTEqualStrategyNumber);
     553       28244 :         if (eq_opr == InvalidOid &&
     554        1388 :             typentry->hash_opf != InvalidOid)
     555        1268 :             eq_opr = get_opfamily_member(typentry->hash_opf,
     556             :                                          typentry->hash_opintype,
     557             :                                          typentry->hash_opintype,
     558             :                                          HTEqualStrategyNumber);
     559             : 
     560             :         /*
     561             :          * If the proposed equality operator is array_eq or record_eq, check
     562             :          * to see if the element type or column types support equality.  If
     563             :          * not, array_eq or record_eq would fail at runtime, so we don't want
     564             :          * to report that the type has equality.  (We can omit similar
     565             :          * checking for ranges and multiranges because ranges can't be created
     566             :          * in the first place unless their subtypes support equality.)
     567             :          */
     568       28244 :         if (eq_opr == ARRAY_EQ_OP &&
     569        4892 :             !array_element_has_equality(typentry))
     570        1058 :             eq_opr = InvalidOid;
     571       27186 :         else if (eq_opr == RECORD_EQ_OP &&
     572         658 :                  !record_fields_have_equality(typentry))
     573         536 :             eq_opr = InvalidOid;
     574             : 
     575             :         /* Force update of eq_opr_finfo only if we're changing state */
     576       28244 :         if (typentry->eq_opr != eq_opr)
     577       26082 :             typentry->eq_opr_finfo.fn_oid = InvalidOid;
     578             : 
     579       28244 :         typentry->eq_opr = eq_opr;
     580             : 
     581             :         /*
     582             :          * Reset info about hash functions whenever we pick up new info about
     583             :          * equality operator.  This is so we can ensure that the hash
     584             :          * functions match the operator.
     585             :          */
     586       28244 :         typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
     587             :                              TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
     588       28244 :         typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
     589             :     }
     590      893540 :     if ((flags & TYPECACHE_LT_OPR) &&
     591      428282 :         !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
     592             :     {
     593       22476 :         Oid         lt_opr = InvalidOid;
     594             : 
     595       22476 :         if (typentry->btree_opf != InvalidOid)
     596       21394 :             lt_opr = get_opfamily_member(typentry->btree_opf,
     597             :                                          typentry->btree_opintype,
     598             :                                          typentry->btree_opintype,
     599             :                                          BTLessStrategyNumber);
     600             : 
     601             :         /*
     602             :          * As above, make sure array_cmp or record_cmp will succeed; but again
     603             :          * we need no special check for ranges or multiranges.
     604             :          */
     605       22476 :         if (lt_opr == ARRAY_LT_OP &&
     606        4328 :             !array_element_has_compare(typentry))
     607        1592 :             lt_opr = InvalidOid;
     608       20884 :         else if (lt_opr == RECORD_LT_OP &&
     609          70 :                  !record_fields_have_compare(typentry))
     610           8 :             lt_opr = InvalidOid;
     611             : 
     612       22476 :         typentry->lt_opr = lt_opr;
     613       22476 :         typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
     614             :     }
     615      893540 :     if ((flags & TYPECACHE_GT_OPR) &&
     616      425542 :         !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
     617             :     {
     618       22452 :         Oid         gt_opr = InvalidOid;
     619             : 
     620       22452 :         if (typentry->btree_opf != InvalidOid)
     621       21370 :             gt_opr = get_opfamily_member(typentry->btree_opf,
     622             :                                          typentry->btree_opintype,
     623             :                                          typentry->btree_opintype,
     624             :                                          BTGreaterStrategyNumber);
     625             : 
     626             :         /*
     627             :          * As above, make sure array_cmp or record_cmp will succeed; but again
     628             :          * we need no special check for ranges or multiranges.
     629             :          */
     630       22452 :         if (gt_opr == ARRAY_GT_OP &&
     631        4322 :             !array_element_has_compare(typentry))
     632        1592 :             gt_opr = InvalidOid;
     633       20860 :         else if (gt_opr == RECORD_GT_OP &&
     634          70 :                  !record_fields_have_compare(typentry))
     635           8 :             gt_opr = InvalidOid;
     636             : 
     637       22452 :         typentry->gt_opr = gt_opr;
     638       22452 :         typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
     639             :     }
     640      893540 :     if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
     641       49286 :         !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
     642             :     {
     643        6292 :         Oid         cmp_proc = InvalidOid;
     644             : 
     645        6292 :         if (typentry->btree_opf != InvalidOid)
     646        5748 :             cmp_proc = get_opfamily_proc(typentry->btree_opf,
     647             :                                          typentry->btree_opintype,
     648             :                                          typentry->btree_opintype,
     649             :                                          BTORDER_PROC);
     650             : 
     651             :         /*
     652             :          * As above, make sure array_cmp or record_cmp will succeed; but again
     653             :          * we need no special check for ranges or multiranges.
     654             :          */
     655        6292 :         if (cmp_proc == F_BTARRAYCMP &&
     656        1100 :             !array_element_has_compare(typentry))
     657         524 :             cmp_proc = InvalidOid;
     658        5768 :         else if (cmp_proc == F_BTRECORDCMP &&
     659         574 :                  !record_fields_have_compare(typentry))
     660         524 :             cmp_proc = InvalidOid;
     661             : 
     662             :         /* Force update of cmp_proc_finfo only if we're changing state */
     663        6292 :         if (typentry->cmp_proc != cmp_proc)
     664        4686 :             typentry->cmp_proc_finfo.fn_oid = InvalidOid;
     665             : 
     666        6292 :         typentry->cmp_proc = cmp_proc;
     667        6292 :         typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
     668             :     }
     669      893540 :     if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
     670      247752 :         !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
     671             :     {
     672       16320 :         Oid         hash_proc = InvalidOid;
     673             : 
     674             :         /*
     675             :          * We insist that the eq_opr, if one has been determined, match the
     676             :          * hash opclass; else report there is no hash function.
     677             :          */
     678       16320 :         if (typentry->hash_opf != InvalidOid &&
     679       31304 :             (!OidIsValid(typentry->eq_opr) ||
     680       15106 :              typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
     681             :                                                      typentry->hash_opintype,
     682             :                                                      typentry->hash_opintype,
     683             :                                                      HTEqualStrategyNumber)))
     684       16198 :             hash_proc = get_opfamily_proc(typentry->hash_opf,
     685             :                                           typentry->hash_opintype,
     686             :                                           typentry->hash_opintype,
     687             :                                           HASHSTANDARD_PROC);
     688             : 
     689             :         /*
     690             :          * As above, make sure hash_array, hash_record, or hash_range will
     691             :          * succeed.
     692             :          */
     693       16320 :         if (hash_proc == F_HASH_ARRAY &&
     694        1128 :             !array_element_has_hashing(typentry))
     695         532 :             hash_proc = InvalidOid;
     696       15788 :         else if (hash_proc == F_HASH_RECORD &&
     697         650 :                  !record_fields_have_hashing(typentry))
     698         548 :             hash_proc = InvalidOid;
     699       15240 :         else if (hash_proc == F_HASH_RANGE &&
     700          12 :                  !range_element_has_hashing(typentry))
     701           4 :             hash_proc = InvalidOid;
     702             : 
     703             :         /*
     704             :          * Likewise for hash_multirange.
     705             :          */
     706       16320 :         if (hash_proc == F_HASH_MULTIRANGE &&
     707           8 :             !multirange_element_has_hashing(typentry))
     708           4 :             hash_proc = InvalidOid;
     709             : 
     710             :         /* Force update of hash_proc_finfo only if we're changing state */
     711       16320 :         if (typentry->hash_proc != hash_proc)
     712       14708 :             typentry->hash_proc_finfo.fn_oid = InvalidOid;
     713             : 
     714       16320 :         typentry->hash_proc = hash_proc;
     715       16320 :         typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
     716             :     }
     717      893540 :     if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
     718       18964 :                   TYPECACHE_HASH_EXTENDED_PROC_FINFO)) &&
     719       18964 :         !(typentry->flags & TCFLAGS_CHECKED_HASH_EXTENDED_PROC))
     720             :     {
     721        6086 :         Oid         hash_extended_proc = InvalidOid;
     722             : 
     723             :         /*
     724             :          * We insist that the eq_opr, if one has been determined, match the
     725             :          * hash opclass; else report there is no hash function.
     726             :          */
     727        6086 :         if (typentry->hash_opf != InvalidOid &&
     728       11056 :             (!OidIsValid(typentry->eq_opr) ||
     729        4994 :              typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
     730             :                                                      typentry->hash_opintype,
     731             :                                                      typentry->hash_opintype,
     732             :                                                      HTEqualStrategyNumber)))
     733        6062 :             hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
     734             :                                                    typentry->hash_opintype,
     735             :                                                    typentry->hash_opintype,
     736             :                                                    HASHEXTENDED_PROC);
     737             : 
     738             :         /*
     739             :          * As above, make sure hash_array_extended, hash_record_extended, or
     740             :          * hash_range_extended will succeed.
     741             :          */
     742        6086 :         if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
     743        1060 :             !array_element_has_extended_hashing(typentry))
     744         524 :             hash_extended_proc = InvalidOid;
     745        5562 :         else if (hash_extended_proc == F_HASH_RECORD_EXTENDED &&
     746         536 :                  !record_fields_have_extended_hashing(typentry))
     747         528 :             hash_extended_proc = InvalidOid;
     748        5034 :         else if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
     749           0 :                  !range_element_has_extended_hashing(typentry))
     750           0 :             hash_extended_proc = InvalidOid;
     751             : 
     752             :         /*
     753             :          * Likewise for hash_multirange_extended.
     754             :          */
     755        6086 :         if (hash_extended_proc == F_HASH_MULTIRANGE_EXTENDED &&
     756           0 :             !multirange_element_has_extended_hashing(typentry))
     757           0 :             hash_extended_proc = InvalidOid;
     758             : 
     759             :         /* Force update of proc finfo only if we're changing state */
     760        6086 :         if (typentry->hash_extended_proc != hash_extended_proc)
     761        5008 :             typentry->hash_extended_proc_finfo.fn_oid = InvalidOid;
     762             : 
     763        6086 :         typentry->hash_extended_proc = hash_extended_proc;
     764        6086 :         typentry->flags |= TCFLAGS_CHECKED_HASH_EXTENDED_PROC;
     765             :     }
     766             : 
     767             :     /*
     768             :      * Set up fmgr lookup info as requested
     769             :      *
     770             :      * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
     771             :      * which is not quite right (they're really in the hash table's private
     772             :      * memory context) but this will do for our purposes.
     773             :      *
     774             :      * Note: the code above avoids invalidating the finfo structs unless the
     775             :      * referenced operator/function OID actually changes.  This is to prevent
     776             :      * unnecessary leakage of any subsidiary data attached to an finfo, since
     777             :      * that would cause session-lifespan memory leaks.
     778             :      */
     779      893540 :     if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
     780        5160 :         typentry->eq_opr_finfo.fn_oid == InvalidOid &&
     781        1172 :         typentry->eq_opr != InvalidOid)
     782             :     {
     783             :         Oid         eq_opr_func;
     784             : 
     785        1168 :         eq_opr_func = get_opcode(typentry->eq_opr);
     786        1168 :         if (eq_opr_func != InvalidOid)
     787        1168 :             fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
     788             :                           CacheMemoryContext);
     789             :     }
     790      893540 :     if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
     791       28006 :         typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
     792        9744 :         typentry->cmp_proc != InvalidOid)
     793             :     {
     794        2370 :         fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
     795             :                       CacheMemoryContext);
     796             :     }
     797      893540 :     if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
     798       21792 :         typentry->hash_proc_finfo.fn_oid == InvalidOid &&
     799        3272 :         typentry->hash_proc != InvalidOid)
     800             :     {
     801        2728 :         fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
     802             :                       CacheMemoryContext);
     803             :     }
     804      893540 :     if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
     805          68 :         typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
     806          20 :         typentry->hash_extended_proc != InvalidOid)
     807             :     {
     808          12 :         fmgr_info_cxt(typentry->hash_extended_proc,
     809             :                       &typentry->hash_extended_proc_finfo,
     810             :                       CacheMemoryContext);
     811             :     }
     812             : 
     813             :     /*
     814             :      * If it's a composite type (row type), get tupdesc if requested
     815             :      */
     816      893540 :     if ((flags & TYPECACHE_TUPDESC) &&
     817       92420 :         typentry->tupDesc == NULL &&
     818        3540 :         typentry->typtype == TYPTYPE_COMPOSITE)
     819             :     {
     820        3426 :         load_typcache_tupdesc(typentry);
     821             :     }
     822             : 
     823             :     /*
     824             :      * If requested, get information about a range type
     825             :      *
     826             :      * This includes making sure that the basic info about the range element
     827             :      * type is up-to-date.
     828             :      */
     829      893540 :     if ((flags & TYPECACHE_RANGE_INFO) &&
     830        8852 :         typentry->typtype == TYPTYPE_RANGE)
     831             :     {
     832        8852 :         if (typentry->rngelemtype == NULL)
     833         330 :             load_rangetype_info(typentry);
     834        8522 :         else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
     835           0 :             (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
     836             :     }
     837             : 
     838             :     /*
     839             :      * If requested, get information about a multirange type
     840             :      */
     841      893540 :     if ((flags & TYPECACHE_MULTIRANGE_INFO) &&
     842        5008 :         typentry->rngtype == NULL &&
     843         152 :         typentry->typtype == TYPTYPE_MULTIRANGE)
     844             :     {
     845         152 :         load_multirangetype_info(typentry);
     846             :     }
     847             : 
     848             :     /*
     849             :      * If requested, get information about a domain type
     850             :      */
     851      893540 :     if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
     852        7990 :         typentry->domainBaseType == InvalidOid &&
     853        4396 :         typentry->typtype == TYPTYPE_DOMAIN)
     854             :     {
     855        1108 :         typentry->domainBaseTypmod = -1;
     856        1108 :         typentry->domainBaseType =
     857        1108 :             getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
     858             :     }
     859      893540 :     if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
     860      145726 :         (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
     861        5272 :         typentry->typtype == TYPTYPE_DOMAIN)
     862             :     {
     863        3054 :         load_domaintype_info(typentry);
     864             :     }
     865             : 
     866      893540 :     return typentry;
     867             : }
     868             : 
     869             : /*
     870             :  * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
     871             :  */
     872             : static void
     873        3546 : load_typcache_tupdesc(TypeCacheEntry *typentry)
     874             : {
     875             :     Relation    rel;
     876             : 
     877        3546 :     if (!OidIsValid(typentry->typrelid)) /* should not happen */
     878           0 :         elog(ERROR, "invalid typrelid for composite type %u",
     879             :              typentry->type_id);
     880        3546 :     rel = relation_open(typentry->typrelid, AccessShareLock);
     881             :     Assert(rel->rd_rel->reltype == typentry->type_id);
     882             : 
     883             :     /*
     884             :      * Link to the tupdesc and increment its refcount (we assert it's a
     885             :      * refcounted descriptor).  We don't use IncrTupleDescRefCount() for this,
     886             :      * because the reference mustn't be entered in the current resource owner;
     887             :      * it can outlive the current query.
     888             :      */
     889        3546 :     typentry->tupDesc = RelationGetDescr(rel);
     890             : 
     891             :     Assert(typentry->tupDesc->tdrefcount > 0);
     892        3546 :     typentry->tupDesc->tdrefcount++;
     893             : 
     894             :     /*
     895             :      * In future, we could take some pains to not change tupDesc_identifier if
     896             :      * the tupdesc didn't really change; but for now it's not worth it.
     897             :      */
     898        3546 :     typentry->tupDesc_identifier = ++tupledesc_id_counter;
     899             : 
     900        3546 :     relation_close(rel, AccessShareLock);
     901        3546 : }
     902             : 
     903             : /*
     904             :  * load_rangetype_info --- helper routine to set up range type information
     905             :  */
     906             : static void
     907         330 : load_rangetype_info(TypeCacheEntry *typentry)
     908             : {
     909             :     Form_pg_range pg_range;
     910             :     HeapTuple   tup;
     911             :     Oid         subtypeOid;
     912             :     Oid         opclassOid;
     913             :     Oid         canonicalOid;
     914             :     Oid         subdiffOid;
     915             :     Oid         opfamilyOid;
     916             :     Oid         opcintype;
     917             :     Oid         cmpFnOid;
     918             : 
     919             :     /* get information from pg_range */
     920         330 :     tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
     921             :     /* should not fail, since we already checked typtype ... */
     922         330 :     if (!HeapTupleIsValid(tup))
     923           0 :         elog(ERROR, "cache lookup failed for range type %u",
     924             :              typentry->type_id);
     925         330 :     pg_range = (Form_pg_range) GETSTRUCT(tup);
     926             : 
     927         330 :     subtypeOid = pg_range->rngsubtype;
     928         330 :     typentry->rng_collation = pg_range->rngcollation;
     929         330 :     opclassOid = pg_range->rngsubopc;
     930         330 :     canonicalOid = pg_range->rngcanonical;
     931         330 :     subdiffOid = pg_range->rngsubdiff;
     932             : 
     933         330 :     ReleaseSysCache(tup);
     934             : 
     935             :     /* get opclass properties and look up the comparison function */
     936         330 :     opfamilyOid = get_opclass_family(opclassOid);
     937         330 :     opcintype = get_opclass_input_type(opclassOid);
     938             : 
     939         330 :     cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
     940             :                                  BTORDER_PROC);
     941         330 :     if (!RegProcedureIsValid(cmpFnOid))
     942           0 :         elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
     943             :              BTORDER_PROC, opcintype, opcintype, opfamilyOid);
     944             : 
     945             :     /* set up cached fmgrinfo structs */
     946         330 :     fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
     947             :                   CacheMemoryContext);
     948         330 :     if (OidIsValid(canonicalOid))
     949         142 :         fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
     950             :                       CacheMemoryContext);
     951         330 :     if (OidIsValid(subdiffOid))
     952         234 :         fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
     953             :                       CacheMemoryContext);
     954             : 
     955             :     /* Lastly, set up link to the element type --- this marks data valid */
     956         330 :     typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
     957         330 : }
     958             : 
     959             : /*
     960             :  * load_multirangetype_info --- helper routine to set up multirange type
     961             :  * information
     962             :  */
     963             : static void
     964         152 : load_multirangetype_info(TypeCacheEntry *typentry)
     965             : {
     966             :     Oid         rangetypeOid;
     967             : 
     968         152 :     rangetypeOid = get_multirange_range(typentry->type_id);
     969         152 :     if (!OidIsValid(rangetypeOid))
     970           0 :         elog(ERROR, "cache lookup failed for multirange type %u",
     971             :              typentry->type_id);
     972             : 
     973         152 :     typentry->rngtype = lookup_type_cache(rangetypeOid, TYPECACHE_RANGE_INFO);
     974         152 : }
     975             : 
     976             : /*
     977             :  * load_domaintype_info --- helper routine to set up domain constraint info
     978             :  *
     979             :  * Note: we assume we're called in a relatively short-lived context, so it's
     980             :  * okay to leak data into the current context while scanning pg_constraint.
     981             :  * We build the new DomainConstraintCache data in a context underneath
     982             :  * CurrentMemoryContext, and reparent it under CacheMemoryContext when
     983             :  * complete.
     984             :  */
     985             : static void
     986        3054 : load_domaintype_info(TypeCacheEntry *typentry)
     987             : {
     988        3054 :     Oid         typeOid = typentry->type_id;
     989             :     DomainConstraintCache *dcc;
     990        3054 :     bool        notNull = false;
     991             :     DomainConstraintState **ccons;
     992             :     int         cconslen;
     993             :     Relation    conRel;
     994             :     MemoryContext oldcxt;
     995             : 
     996             :     /*
     997             :      * If we're here, any existing constraint info is stale, so release it.
     998             :      * For safety, be sure to null the link before trying to delete the data.
     999             :      */
    1000        3054 :     if (typentry->domainData)
    1001             :     {
    1002         294 :         dcc = typentry->domainData;
    1003         294 :         typentry->domainData = NULL;
    1004         294 :         decr_dcc_refcount(dcc);
    1005             :     }
    1006             : 
    1007             :     /*
    1008             :      * We try to optimize the common case of no domain constraints, so don't
    1009             :      * create the dcc object and context until we find a constraint.  Likewise
    1010             :      * for the temp sorting array.
    1011             :      */
    1012        3054 :     dcc = NULL;
    1013        3054 :     ccons = NULL;
    1014        3054 :     cconslen = 0;
    1015             : 
    1016             :     /*
    1017             :      * Scan pg_constraint for relevant constraints.  We want to find
    1018             :      * constraints for not just this domain, but any ancestor domains, so the
    1019             :      * outer loop crawls up the domain stack.
    1020             :      */
    1021        3054 :     conRel = table_open(ConstraintRelationId, AccessShareLock);
    1022             : 
    1023             :     for (;;)
    1024        3074 :     {
    1025             :         HeapTuple   tup;
    1026             :         HeapTuple   conTup;
    1027             :         Form_pg_type typTup;
    1028        6128 :         int         nccons = 0;
    1029             :         ScanKeyData key[1];
    1030             :         SysScanDesc scan;
    1031             : 
    1032        6128 :         tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
    1033        6128 :         if (!HeapTupleIsValid(tup))
    1034           0 :             elog(ERROR, "cache lookup failed for type %u", typeOid);
    1035        6128 :         typTup = (Form_pg_type) GETSTRUCT(tup);
    1036             : 
    1037        6128 :         if (typTup->typtype != TYPTYPE_DOMAIN)
    1038             :         {
    1039             :             /* Not a domain, so done */
    1040        3054 :             ReleaseSysCache(tup);
    1041        3054 :             break;
    1042             :         }
    1043             : 
    1044             :         /* Test for NOT NULL Constraint */
    1045        3074 :         if (typTup->typnotnull)
    1046          54 :             notNull = true;
    1047             : 
    1048             :         /* Look for CHECK Constraints on this domain */
    1049        3074 :         ScanKeyInit(&key[0],
    1050             :                     Anum_pg_constraint_contypid,
    1051             :                     BTEqualStrategyNumber, F_OIDEQ,
    1052             :                     ObjectIdGetDatum(typeOid));
    1053             : 
    1054        3074 :         scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
    1055             :                                   NULL, 1, key);
    1056             : 
    1057        4538 :         while (HeapTupleIsValid(conTup = systable_getnext(scan)))
    1058             :         {
    1059        1464 :             Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
    1060             :             Datum       val;
    1061             :             bool        isNull;
    1062             :             char       *constring;
    1063             :             Expr       *check_expr;
    1064             :             DomainConstraintState *r;
    1065             : 
    1066             :             /* Ignore non-CHECK constraints (presently, shouldn't be any) */
    1067        1464 :             if (c->contype != CONSTRAINT_CHECK)
    1068           0 :                 continue;
    1069             : 
    1070             :             /* Not expecting conbin to be NULL, but we'll test for it anyway */
    1071        1464 :             val = fastgetattr(conTup, Anum_pg_constraint_conbin,
    1072             :                               conRel->rd_att, &isNull);
    1073        1464 :             if (isNull)
    1074           0 :                 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
    1075             :                      NameStr(typTup->typname), NameStr(c->conname));
    1076             : 
    1077             :             /* Convert conbin to C string in caller context */
    1078        1464 :             constring = TextDatumGetCString(val);
    1079             : 
    1080             :             /* Create the DomainConstraintCache object and context if needed */
    1081        1464 :             if (dcc == NULL)
    1082             :             {
    1083             :                 MemoryContext cxt;
    1084             : 
    1085        1446 :                 cxt = AllocSetContextCreate(CurrentMemoryContext,
    1086             :                                             "Domain constraints",
    1087             :                                             ALLOCSET_SMALL_SIZES);
    1088             :                 dcc = (DomainConstraintCache *)
    1089        1446 :                     MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
    1090        1446 :                 dcc->constraints = NIL;
    1091        1446 :                 dcc->dccContext = cxt;
    1092        1446 :                 dcc->dccRefCount = 0;
    1093             :             }
    1094             : 
    1095             :             /* Create node trees in DomainConstraintCache's context */
    1096        1464 :             oldcxt = MemoryContextSwitchTo(dcc->dccContext);
    1097             : 
    1098        1464 :             check_expr = (Expr *) stringToNode(constring);
    1099             : 
    1100             :             /*
    1101             :              * Plan the expression, since ExecInitExpr will expect that.
    1102             :              *
    1103             :              * Note: caching the result of expression_planner() is not very
    1104             :              * good practice.  Ideally we'd use a CachedExpression here so
    1105             :              * that we would react promptly to, eg, changes in inlined
    1106             :              * functions.  However, because we don't support mutable domain
    1107             :              * CHECK constraints, it's not really clear that it's worth the
    1108             :              * extra overhead to do that.
    1109             :              */
    1110        1464 :             check_expr = expression_planner(check_expr);
    1111             : 
    1112        1464 :             r = makeNode(DomainConstraintState);
    1113        1464 :             r->constrainttype = DOM_CONSTRAINT_CHECK;
    1114        1464 :             r->name = pstrdup(NameStr(c->conname));
    1115        1464 :             r->check_expr = check_expr;
    1116        1464 :             r->check_exprstate = NULL;
    1117             : 
    1118        1464 :             MemoryContextSwitchTo(oldcxt);
    1119             : 
    1120             :             /* Accumulate constraints in an array, for sorting below */
    1121        1464 :             if (ccons == NULL)
    1122             :             {
    1123        1446 :                 cconslen = 8;
    1124             :                 ccons = (DomainConstraintState **)
    1125        1446 :                     palloc(cconslen * sizeof(DomainConstraintState *));
    1126             :             }
    1127          18 :             else if (nccons >= cconslen)
    1128             :             {
    1129           0 :                 cconslen *= 2;
    1130             :                 ccons = (DomainConstraintState **)
    1131           0 :                     repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
    1132             :             }
    1133        1464 :             ccons[nccons++] = r;
    1134             :         }
    1135             : 
    1136        3074 :         systable_endscan(scan);
    1137             : 
    1138        3074 :         if (nccons > 0)
    1139             :         {
    1140             :             /*
    1141             :              * Sort the items for this domain, so that CHECKs are applied in a
    1142             :              * deterministic order.
    1143             :              */
    1144        1456 :             if (nccons > 1)
    1145           6 :                 qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
    1146             : 
    1147             :             /*
    1148             :              * Now attach them to the overall list.  Use lcons() here because
    1149             :              * constraints of parent domains should be applied earlier.
    1150             :              */
    1151        1456 :             oldcxt = MemoryContextSwitchTo(dcc->dccContext);
    1152        2920 :             while (nccons > 0)
    1153        1464 :                 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
    1154        1456 :             MemoryContextSwitchTo(oldcxt);
    1155             :         }
    1156             : 
    1157             :         /* loop to next domain in stack */
    1158        3074 :         typeOid = typTup->typbasetype;
    1159        3074 :         ReleaseSysCache(tup);
    1160             :     }
    1161             : 
    1162        3054 :     table_close(conRel, AccessShareLock);
    1163             : 
    1164             :     /*
    1165             :      * Only need to add one NOT NULL check regardless of how many domains in
    1166             :      * the stack request it.
    1167             :      */
    1168        3054 :     if (notNull)
    1169             :     {
    1170             :         DomainConstraintState *r;
    1171             : 
    1172             :         /* Create the DomainConstraintCache object and context if needed */
    1173          54 :         if (dcc == NULL)
    1174             :         {
    1175             :             MemoryContext cxt;
    1176             : 
    1177          46 :             cxt = AllocSetContextCreate(CurrentMemoryContext,
    1178             :                                         "Domain constraints",
    1179             :                                         ALLOCSET_SMALL_SIZES);
    1180             :             dcc = (DomainConstraintCache *)
    1181          46 :                 MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
    1182          46 :             dcc->constraints = NIL;
    1183          46 :             dcc->dccContext = cxt;
    1184          46 :             dcc->dccRefCount = 0;
    1185             :         }
    1186             : 
    1187             :         /* Create node trees in DomainConstraintCache's context */
    1188          54 :         oldcxt = MemoryContextSwitchTo(dcc->dccContext);
    1189             : 
    1190          54 :         r = makeNode(DomainConstraintState);
    1191             : 
    1192          54 :         r->constrainttype = DOM_CONSTRAINT_NOTNULL;
    1193          54 :         r->name = pstrdup("NOT NULL");
    1194          54 :         r->check_expr = NULL;
    1195          54 :         r->check_exprstate = NULL;
    1196             : 
    1197             :         /* lcons to apply the nullness check FIRST */
    1198          54 :         dcc->constraints = lcons(r, dcc->constraints);
    1199             : 
    1200          54 :         MemoryContextSwitchTo(oldcxt);
    1201             :     }
    1202             : 
    1203             :     /*
    1204             :      * If we made a constraint object, move it into CacheMemoryContext and
    1205             :      * attach it to the typcache entry.
    1206             :      */
    1207        3054 :     if (dcc)
    1208             :     {
    1209        1492 :         MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
    1210        1492 :         typentry->domainData = dcc;
    1211        1492 :         dcc->dccRefCount++;      /* count the typcache's reference */
    1212             :     }
    1213             : 
    1214             :     /* Either way, the typcache entry's domain data is now valid. */
    1215        3054 :     typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
    1216        3054 : }
    1217             : 
    1218             : /*
    1219             :  * qsort comparator to sort DomainConstraintState pointers by name
    1220             :  */
    1221             : static int
    1222           8 : dcs_cmp(const void *a, const void *b)
    1223             : {
    1224           8 :     const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
    1225           8 :     const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
    1226             : 
    1227           8 :     return strcmp((*ca)->name, (*cb)->name);
    1228             : }
    1229             : 
    1230             : /*
    1231             :  * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
    1232             :  * and free it if no references remain
    1233             :  */
    1234             : static void
    1235       37394 : decr_dcc_refcount(DomainConstraintCache *dcc)
    1236             : {
    1237             :     Assert(dcc->dccRefCount > 0);
    1238       37394 :     if (--(dcc->dccRefCount) <= 0)
    1239         290 :         MemoryContextDelete(dcc->dccContext);
    1240       37394 : }
    1241             : 
    1242             : /*
    1243             :  * Context reset/delete callback for a DomainConstraintRef
    1244             :  */
    1245             : static void
    1246       39654 : dccref_deletion_callback(void *arg)
    1247             : {
    1248       39654 :     DomainConstraintRef *ref = (DomainConstraintRef *) arg;
    1249       39654 :     DomainConstraintCache *dcc = ref->dcc;
    1250             : 
    1251             :     /* Paranoia --- be sure link is nulled before trying to release */
    1252       39654 :     if (dcc)
    1253             :     {
    1254       37100 :         ref->constraints = NIL;
    1255       37100 :         ref->dcc = NULL;
    1256       37100 :         decr_dcc_refcount(dcc);
    1257             :     }
    1258       39654 : }
    1259             : 
    1260             : /*
    1261             :  * prep_domain_constraints --- prepare domain constraints for execution
    1262             :  *
    1263             :  * The expression trees stored in the DomainConstraintCache's list are
    1264             :  * converted to executable expression state trees stored in execctx.
    1265             :  */
    1266             : static List *
    1267        2020 : prep_domain_constraints(List *constraints, MemoryContext execctx)
    1268             : {
    1269        2020 :     List       *result = NIL;
    1270             :     MemoryContext oldcxt;
    1271             :     ListCell   *lc;
    1272             : 
    1273        2020 :     oldcxt = MemoryContextSwitchTo(execctx);
    1274             : 
    1275        4056 :     foreach(lc, constraints)
    1276             :     {
    1277        2036 :         DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
    1278             :         DomainConstraintState *newr;
    1279             : 
    1280        2036 :         newr = makeNode(DomainConstraintState);
    1281        2036 :         newr->constrainttype = r->constrainttype;
    1282        2036 :         newr->name = r->name;
    1283        2036 :         newr->check_expr = r->check_expr;
    1284        2036 :         newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
    1285             : 
    1286        2036 :         result = lappend(result, newr);
    1287             :     }
    1288             : 
    1289        2020 :     MemoryContextSwitchTo(oldcxt);
    1290             : 
    1291        2020 :     return result;
    1292             : }
    1293             : 
    1294             : /*
    1295             :  * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
    1296             :  *
    1297             :  * Caller must tell us the MemoryContext in which the DomainConstraintRef
    1298             :  * lives.  The ref will be cleaned up when that context is reset/deleted.
    1299             :  *
    1300             :  * Caller must also tell us whether it wants check_exprstate fields to be
    1301             :  * computed in the DomainConstraintState nodes attached to this ref.
    1302             :  * If it doesn't, we need not make a copy of the DomainConstraintState list.
    1303             :  */
    1304             : void
    1305       39682 : InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
    1306             :                         MemoryContext refctx, bool need_exprstate)
    1307             : {
    1308             :     /* Look up the typcache entry --- we assume it survives indefinitely */
    1309       39682 :     ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
    1310       39682 :     ref->need_exprstate = need_exprstate;
    1311             :     /* For safety, establish the callback before acquiring a refcount */
    1312       39682 :     ref->refctx = refctx;
    1313       39682 :     ref->dcc = NULL;
    1314       39682 :     ref->callback.func = dccref_deletion_callback;
    1315       39682 :     ref->callback.arg = (void *) ref;
    1316       39682 :     MemoryContextRegisterResetCallback(refctx, &ref->callback);
    1317             :     /* Acquire refcount if there are constraints, and set up exported list */
    1318       39682 :     if (ref->tcache->domainData)
    1319             :     {
    1320       37128 :         ref->dcc = ref->tcache->domainData;
    1321       37128 :         ref->dcc->dccRefCount++;
    1322       37128 :         if (ref->need_exprstate)
    1323        2020 :             ref->constraints = prep_domain_constraints(ref->dcc->constraints,
    1324             :                                                        ref->refctx);
    1325             :         else
    1326       35108 :             ref->constraints = ref->dcc->constraints;
    1327             :     }
    1328             :     else
    1329        2554 :         ref->constraints = NIL;
    1330       39682 : }
    1331             : 
    1332             : /*
    1333             :  * UpdateDomainConstraintRef --- recheck validity of domain constraint info
    1334             :  *
    1335             :  * If the domain's constraint set changed, ref->constraints is updated to
    1336             :  * point at a new list of cached constraints.
    1337             :  *
    1338             :  * In the normal case where nothing happened to the domain, this is cheap
    1339             :  * enough that it's reasonable (and expected) to check before *each* use
    1340             :  * of the constraint info.
    1341             :  */
    1342             : void
    1343     2064314 : UpdateDomainConstraintRef(DomainConstraintRef *ref)
    1344             : {
    1345     2064314 :     TypeCacheEntry *typentry = ref->tcache;
    1346             : 
    1347             :     /* Make sure typcache entry's data is up to date */
    1348     2064314 :     if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
    1349           0 :         typentry->typtype == TYPTYPE_DOMAIN)
    1350           0 :         load_domaintype_info(typentry);
    1351             : 
    1352             :     /* Transfer to ref object if there's new info, adjusting refcounts */
    1353     2064314 :     if (ref->dcc != typentry->domainData)
    1354             :     {
    1355             :         /* Paranoia --- be sure link is nulled before trying to release */
    1356           0 :         DomainConstraintCache *dcc = ref->dcc;
    1357             : 
    1358           0 :         if (dcc)
    1359             :         {
    1360             :             /*
    1361             :              * Note: we just leak the previous list of executable domain
    1362             :              * constraints.  Alternatively, we could keep those in a child
    1363             :              * context of ref->refctx and free that context at this point.
    1364             :              * However, in practice this code path will be taken so seldom
    1365             :              * that the extra bookkeeping for a child context doesn't seem
    1366             :              * worthwhile; we'll just allow a leak for the lifespan of refctx.
    1367             :              */
    1368           0 :             ref->constraints = NIL;
    1369           0 :             ref->dcc = NULL;
    1370           0 :             decr_dcc_refcount(dcc);
    1371             :         }
    1372           0 :         dcc = typentry->domainData;
    1373           0 :         if (dcc)
    1374             :         {
    1375           0 :             ref->dcc = dcc;
    1376           0 :             dcc->dccRefCount++;
    1377           0 :             if (ref->need_exprstate)
    1378           0 :                 ref->constraints = prep_domain_constraints(dcc->constraints,
    1379             :                                                            ref->refctx);
    1380             :             else
    1381           0 :                 ref->constraints = dcc->constraints;
    1382             :         }
    1383             :     }
    1384     2064314 : }
    1385             : 
    1386             : /*
    1387             :  * DomainHasConstraints --- utility routine to check if a domain has constraints
    1388             :  *
    1389             :  * This is defined to return false, not fail, if type is not a domain.
    1390             :  */
    1391             : bool
    1392      106044 : DomainHasConstraints(Oid type_id)
    1393             : {
    1394             :     TypeCacheEntry *typentry;
    1395             : 
    1396             :     /*
    1397             :      * Note: a side effect is to cause the typcache's domain data to become
    1398             :      * valid.  This is fine since we'll likely need it soon if there is any.
    1399             :      */
    1400      106044 :     typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
    1401             : 
    1402      106044 :     return (typentry->domainData != NULL);
    1403             : }
    1404             : 
    1405             : 
    1406             : /*
    1407             :  * array_element_has_equality and friends are helper routines to check
    1408             :  * whether we should believe that array_eq and related functions will work
    1409             :  * on the given array type or composite type.
    1410             :  *
    1411             :  * The logic above may call these repeatedly on the same type entry, so we
    1412             :  * make use of the typentry->flags field to cache the results once known.
    1413             :  * Also, we assume that we'll probably want all these facts about the type
    1414             :  * if we want any, so we cache them all using only one lookup of the
    1415             :  * component datatype(s).
    1416             :  */
    1417             : 
    1418             : static bool
    1419        4892 : array_element_has_equality(TypeCacheEntry *typentry)
    1420             : {
    1421        4892 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1422        4872 :         cache_array_element_properties(typentry);
    1423        4892 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
    1424             : }
    1425             : 
    1426             : static bool
    1427        9750 : array_element_has_compare(TypeCacheEntry *typentry)
    1428             : {
    1429        9750 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1430          56 :         cache_array_element_properties(typentry);
    1431        9750 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
    1432             : }
    1433             : 
    1434             : static bool
    1435        1128 : array_element_has_hashing(TypeCacheEntry *typentry)
    1436             : {
    1437        1128 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1438           0 :         cache_array_element_properties(typentry);
    1439        1128 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
    1440             : }
    1441             : 
    1442             : static bool
    1443        1060 : array_element_has_extended_hashing(TypeCacheEntry *typentry)
    1444             : {
    1445        1060 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1446           0 :         cache_array_element_properties(typentry);
    1447        1060 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
    1448             : }
    1449             : 
    1450             : static void
    1451        4928 : cache_array_element_properties(TypeCacheEntry *typentry)
    1452             : {
    1453        4928 :     Oid         elem_type = get_base_element_type(typentry->type_id);
    1454             : 
    1455        4928 :     if (OidIsValid(elem_type))
    1456             :     {
    1457             :         TypeCacheEntry *elementry;
    1458             : 
    1459        4394 :         elementry = lookup_type_cache(elem_type,
    1460             :                                       TYPECACHE_EQ_OPR |
    1461             :                                       TYPECACHE_CMP_PROC |
    1462             :                                       TYPECACHE_HASH_PROC |
    1463             :                                       TYPECACHE_HASH_EXTENDED_PROC);
    1464        4394 :         if (OidIsValid(elementry->eq_opr))
    1465        3870 :             typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
    1466        4394 :         if (OidIsValid(elementry->cmp_proc))
    1467        3334 :             typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
    1468        4394 :         if (OidIsValid(elementry->hash_proc))
    1469        3862 :             typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
    1470        4394 :         if (OidIsValid(elementry->hash_extended_proc))
    1471        3862 :             typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
    1472             :     }
    1473        4928 :     typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
    1474        4928 : }
    1475             : 
    1476             : /*
    1477             :  * Likewise, some helper functions for composite types.
    1478             :  */
    1479             : 
    1480             : static bool
    1481         658 : record_fields_have_equality(TypeCacheEntry *typentry)
    1482             : {
    1483         658 :     if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
    1484         642 :         cache_record_field_properties(typentry);
    1485         658 :     return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
    1486             : }
    1487             : 
    1488             : static bool
    1489         714 : record_fields_have_compare(TypeCacheEntry *typentry)
    1490             : {
    1491         714 :     if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
    1492          38 :         cache_record_field_properties(typentry);
    1493         714 :     return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
    1494             : }
    1495             : 
    1496             : static bool
    1497         650 : record_fields_have_hashing(TypeCacheEntry *typentry)
    1498             : {
    1499         650 :     if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
    1500           0 :         cache_record_field_properties(typentry);
    1501         650 :     return (typentry->flags & TCFLAGS_HAVE_FIELD_HASHING) != 0;
    1502             : }
    1503             : 
    1504             : static bool
    1505         536 : record_fields_have_extended_hashing(TypeCacheEntry *typentry)
    1506             : {
    1507         536 :     if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
    1508           0 :         cache_record_field_properties(typentry);
    1509         536 :     return (typentry->flags & TCFLAGS_HAVE_FIELD_EXTENDED_HASHING) != 0;
    1510             : }
    1511             : 
    1512             : static void
    1513         680 : cache_record_field_properties(TypeCacheEntry *typentry)
    1514             : {
    1515             :     /*
    1516             :      * For type RECORD, we can't really tell what will work, since we don't
    1517             :      * have access here to the specific anonymous type.  Just assume that
    1518             :      * equality and comparison will (we may get a failure at runtime).  We
    1519             :      * could also claim that hashing works, but then if code that has the
    1520             :      * option between a comparison-based (sort-based) and a hash-based plan
    1521             :      * chooses hashing, stuff could fail that would otherwise work if it chose
    1522             :      * a comparison-based plan.  In practice more types support comparison
    1523             :      * than hashing.
    1524             :      */
    1525         680 :     if (typentry->type_id == RECORDOID)
    1526             :     {
    1527          20 :         typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
    1528             :                             TCFLAGS_HAVE_FIELD_COMPARE);
    1529             :     }
    1530         660 :     else if (typentry->typtype == TYPTYPE_COMPOSITE)
    1531             :     {
    1532             :         TupleDesc   tupdesc;
    1533             :         int         newflags;
    1534             :         int         i;
    1535             : 
    1536             :         /* Fetch composite type's tupdesc if we don't have it already */
    1537         660 :         if (typentry->tupDesc == NULL)
    1538         120 :             load_typcache_tupdesc(typentry);
    1539         660 :         tupdesc = typentry->tupDesc;
    1540             : 
    1541             :         /* Must bump the refcount while we do additional catalog lookups */
    1542         660 :         IncrTupleDescRefCount(tupdesc);
    1543             : 
    1544             :         /* Have each property if all non-dropped fields have the property */
    1545         660 :         newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
    1546             :                     TCFLAGS_HAVE_FIELD_COMPARE |
    1547             :                     TCFLAGS_HAVE_FIELD_HASHING |
    1548             :                     TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
    1549       14606 :         for (i = 0; i < tupdesc->natts; i++)
    1550             :         {
    1551             :             TypeCacheEntry *fieldentry;
    1552       14482 :             Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
    1553             : 
    1554       14482 :             if (attr->attisdropped)
    1555           0 :                 continue;
    1556             : 
    1557       14482 :             fieldentry = lookup_type_cache(attr->atttypid,
    1558             :                                            TYPECACHE_EQ_OPR |
    1559             :                                            TYPECACHE_CMP_PROC |
    1560             :                                            TYPECACHE_HASH_PROC |
    1561             :                                            TYPECACHE_HASH_EXTENDED_PROC);
    1562       14482 :             if (!OidIsValid(fieldentry->eq_opr))
    1563         536 :                 newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
    1564       14482 :             if (!OidIsValid(fieldentry->cmp_proc))
    1565         536 :                 newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
    1566       14482 :             if (!OidIsValid(fieldentry->hash_proc))
    1567         540 :                 newflags &= ~TCFLAGS_HAVE_FIELD_HASHING;
    1568       14482 :             if (!OidIsValid(fieldentry->hash_extended_proc))
    1569         540 :                 newflags &= ~TCFLAGS_HAVE_FIELD_EXTENDED_HASHING;
    1570             : 
    1571             :             /* We can drop out of the loop once we disprove all bits */
    1572       14482 :             if (newflags == 0)
    1573         536 :                 break;
    1574             :         }
    1575         660 :         typentry->flags |= newflags;
    1576             : 
    1577         660 :         DecrTupleDescRefCount(tupdesc);
    1578             :     }
    1579           0 :     else if (typentry->typtype == TYPTYPE_DOMAIN)
    1580             :     {
    1581             :         /* If it's domain over composite, copy base type's properties */
    1582             :         TypeCacheEntry *baseentry;
    1583             : 
    1584             :         /* load up basetype info if we didn't already */
    1585           0 :         if (typentry->domainBaseType == InvalidOid)
    1586             :         {
    1587           0 :             typentry->domainBaseTypmod = -1;
    1588           0 :             typentry->domainBaseType =
    1589           0 :                 getBaseTypeAndTypmod(typentry->type_id,
    1590             :                                      &typentry->domainBaseTypmod);
    1591             :         }
    1592           0 :         baseentry = lookup_type_cache(typentry->domainBaseType,
    1593             :                                       TYPECACHE_EQ_OPR |
    1594             :                                       TYPECACHE_CMP_PROC |
    1595             :                                       TYPECACHE_HASH_PROC |
    1596             :                                       TYPECACHE_HASH_EXTENDED_PROC);
    1597           0 :         if (baseentry->typtype == TYPTYPE_COMPOSITE)
    1598             :         {
    1599           0 :             typentry->flags |= TCFLAGS_DOMAIN_BASE_IS_COMPOSITE;
    1600           0 :             typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
    1601             :                                                    TCFLAGS_HAVE_FIELD_COMPARE |
    1602             :                                                    TCFLAGS_HAVE_FIELD_HASHING |
    1603             :                                                    TCFLAGS_HAVE_FIELD_EXTENDED_HASHING);
    1604             :         }
    1605             :     }
    1606         680 :     typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
    1607         680 : }
    1608             : 
    1609             : /*
    1610             :  * Likewise, some helper functions for range and multirange types.
    1611             :  *
    1612             :  * We can borrow the flag bits for array element properties to use for range
    1613             :  * element properties, since those flag bits otherwise have no use in a
    1614             :  * range or multirange type's typcache entry.
    1615             :  */
    1616             : 
    1617             : static bool
    1618          12 : range_element_has_hashing(TypeCacheEntry *typentry)
    1619             : {
    1620          12 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1621          12 :         cache_range_element_properties(typentry);
    1622          12 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
    1623             : }
    1624             : 
    1625             : static bool
    1626           0 : range_element_has_extended_hashing(TypeCacheEntry *typentry)
    1627             : {
    1628           0 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1629           0 :         cache_range_element_properties(typentry);
    1630           0 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
    1631             : }
    1632             : 
    1633             : static void
    1634          12 : cache_range_element_properties(TypeCacheEntry *typentry)
    1635             : {
    1636             :     /* load up subtype link if we didn't already */
    1637          12 :     if (typentry->rngelemtype == NULL &&
    1638           0 :         typentry->typtype == TYPTYPE_RANGE)
    1639           0 :         load_rangetype_info(typentry);
    1640             : 
    1641          12 :     if (typentry->rngelemtype != NULL)
    1642             :     {
    1643             :         TypeCacheEntry *elementry;
    1644             : 
    1645             :         /* might need to calculate subtype's hash function properties */
    1646          12 :         elementry = lookup_type_cache(typentry->rngelemtype->type_id,
    1647             :                                       TYPECACHE_HASH_PROC |
    1648             :                                       TYPECACHE_HASH_EXTENDED_PROC);
    1649          12 :         if (OidIsValid(elementry->hash_proc))
    1650           8 :             typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
    1651          12 :         if (OidIsValid(elementry->hash_extended_proc))
    1652           8 :             typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
    1653             :     }
    1654          12 :     typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
    1655          12 : }
    1656             : 
    1657             : static bool
    1658           8 : multirange_element_has_hashing(TypeCacheEntry *typentry)
    1659             : {
    1660           8 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1661           8 :         cache_multirange_element_properties(typentry);
    1662           8 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
    1663             : }
    1664             : 
    1665             : static bool
    1666           0 : multirange_element_has_extended_hashing(TypeCacheEntry *typentry)
    1667             : {
    1668           0 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1669           0 :         cache_multirange_element_properties(typentry);
    1670           0 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
    1671             : }
    1672             : 
    1673             : static void
    1674           8 : cache_multirange_element_properties(TypeCacheEntry *typentry)
    1675             : {
    1676             :     /* load up range link if we didn't already */
    1677           8 :     if (typentry->rngtype == NULL &&
    1678           0 :         typentry->typtype == TYPTYPE_MULTIRANGE)
    1679           0 :         load_multirangetype_info(typentry);
    1680             : 
    1681           8 :     if (typentry->rngtype != NULL && typentry->rngtype->rngelemtype != NULL)
    1682             :     {
    1683             :         TypeCacheEntry *elementry;
    1684             : 
    1685             :         /* might need to calculate subtype's hash function properties */
    1686           8 :         elementry = lookup_type_cache(typentry->rngtype->rngelemtype->type_id,
    1687             :                                       TYPECACHE_HASH_PROC |
    1688             :                                       TYPECACHE_HASH_EXTENDED_PROC);
    1689           8 :         if (OidIsValid(elementry->hash_proc))
    1690           4 :             typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
    1691           8 :         if (OidIsValid(elementry->hash_extended_proc))
    1692           4 :             typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
    1693             :     }
    1694           8 :     typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
    1695           8 : }
    1696             : 
    1697             : /*
    1698             :  * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
    1699             :  * to store 'typmod'.
    1700             :  */
    1701             : static void
    1702       22020 : ensure_record_cache_typmod_slot_exists(int32 typmod)
    1703             : {
    1704       22020 :     if (RecordCacheArray == NULL)
    1705             :     {
    1706        3594 :         RecordCacheArray = (TupleDesc *)
    1707        3594 :             MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(TupleDesc));
    1708        3594 :         RecordIdentifierArray = (uint64 *)
    1709        3594 :             MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(uint64));
    1710        3594 :         RecordCacheArrayLen = 64;
    1711             :     }
    1712             : 
    1713       22020 :     if (typmod >= RecordCacheArrayLen)
    1714             :     {
    1715           0 :         int32       newlen = pg_nextpower2_32(typmod + 1);
    1716             : 
    1717           0 :         RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
    1718             :                                                   newlen * sizeof(TupleDesc));
    1719           0 :         memset(RecordCacheArray + RecordCacheArrayLen, 0,
    1720           0 :                (newlen - RecordCacheArrayLen) * sizeof(TupleDesc));
    1721           0 :         RecordIdentifierArray = (uint64 *) repalloc(RecordIdentifierArray,
    1722             :                                                     newlen * sizeof(uint64));
    1723           0 :         memset(RecordIdentifierArray + RecordCacheArrayLen, 0,
    1724           0 :                (newlen - RecordCacheArrayLen) * sizeof(uint64));
    1725           0 :         RecordCacheArrayLen = newlen;
    1726             :     }
    1727       22020 : }
    1728             : 
    1729             : /*
    1730             :  * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
    1731             :  *
    1732             :  * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
    1733             :  * hasn't had its refcount bumped.
    1734             :  */
    1735             : static TupleDesc
    1736      114638 : lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
    1737             : {
    1738      114638 :     if (type_id != RECORDOID)
    1739             :     {
    1740             :         /*
    1741             :          * It's a named composite type, so use the regular typcache.
    1742             :          */
    1743             :         TypeCacheEntry *typentry;
    1744             : 
    1745       76988 :         typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
    1746       76988 :         if (typentry->tupDesc == NULL && !noError)
    1747           0 :             ereport(ERROR,
    1748             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    1749             :                      errmsg("type %s is not composite",
    1750             :                             format_type_be(type_id))));
    1751       76988 :         return typentry->tupDesc;
    1752             :     }
    1753             :     else
    1754             :     {
    1755             :         /*
    1756             :          * It's a transient record type, so look in our record-type table.
    1757             :          */
    1758       37650 :         if (typmod >= 0)
    1759             :         {
    1760             :             /* It is already in our local cache? */
    1761       37634 :             if (typmod < RecordCacheArrayLen &&
    1762       37630 :                 RecordCacheArray[typmod] != NULL)
    1763       37614 :                 return RecordCacheArray[typmod];
    1764             : 
    1765             :             /* Are we attached to a shared record typmod registry? */
    1766          20 :             if (CurrentSession->shared_typmod_registry != NULL)
    1767             :             {
    1768             :                 SharedTypmodTableEntry *entry;
    1769             : 
    1770             :                 /* Try to find it in the shared typmod index. */
    1771          20 :                 entry = dshash_find(CurrentSession->shared_typmod_table,
    1772             :                                     &typmod, false);
    1773          20 :                 if (entry != NULL)
    1774             :                 {
    1775             :                     TupleDesc   tupdesc;
    1776             : 
    1777             :                     tupdesc = (TupleDesc)
    1778          20 :                         dsa_get_address(CurrentSession->area,
    1779             :                                         entry->shared_tupdesc);
    1780             :                     Assert(typmod == tupdesc->tdtypmod);
    1781             : 
    1782             :                     /* We may need to extend the local RecordCacheArray. */
    1783          20 :                     ensure_record_cache_typmod_slot_exists(typmod);
    1784             : 
    1785             :                     /*
    1786             :                      * Our local array can now point directly to the TupleDesc
    1787             :                      * in shared memory, which is non-reference-counted.
    1788             :                      */
    1789          20 :                     RecordCacheArray[typmod] = tupdesc;
    1790             :                     Assert(tupdesc->tdrefcount == -1);
    1791             : 
    1792             :                     /*
    1793             :                      * We don't share tupdesc identifiers across processes, so
    1794             :                      * assign one locally.
    1795             :                      */
    1796          20 :                     RecordIdentifierArray[typmod] = ++tupledesc_id_counter;
    1797             : 
    1798          20 :                     dshash_release_lock(CurrentSession->shared_typmod_table,
    1799             :                                         entry);
    1800             : 
    1801          20 :                     return RecordCacheArray[typmod];
    1802             :                 }
    1803             :             }
    1804             :         }
    1805             : 
    1806          16 :         if (!noError)
    1807           0 :             ereport(ERROR,
    1808             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    1809             :                      errmsg("record type has not been registered")));
    1810          16 :         return NULL;
    1811             :     }
    1812             : }
    1813             : 
    1814             : /*
    1815             :  * lookup_rowtype_tupdesc
    1816             :  *
    1817             :  * Given a typeid/typmod that should describe a known composite type,
    1818             :  * return the tuple descriptor for the type.  Will ereport on failure.
    1819             :  * (Use ereport because this is reachable with user-specified OIDs,
    1820             :  * for example from record_in().)
    1821             :  *
    1822             :  * Note: on success, we increment the refcount of the returned TupleDesc,
    1823             :  * and log the reference in CurrentResourceOwner.  Caller should call
    1824             :  * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
    1825             :  */
    1826             : TupleDesc
    1827       48244 : lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
    1828             : {
    1829             :     TupleDesc   tupDesc;
    1830             : 
    1831       48244 :     tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
    1832       48244 :     PinTupleDesc(tupDesc);
    1833       48244 :     return tupDesc;
    1834             : }
    1835             : 
    1836             : /*
    1837             :  * lookup_rowtype_tupdesc_noerror
    1838             :  *
    1839             :  * As above, but if the type is not a known composite type and noError
    1840             :  * is true, returns NULL instead of ereport'ing.  (Note that if a bogus
    1841             :  * type_id is passed, you'll get an ereport anyway.)
    1842             :  */
    1843             : TupleDesc
    1844           8 : lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
    1845             : {
    1846             :     TupleDesc   tupDesc;
    1847             : 
    1848           8 :     tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
    1849           8 :     if (tupDesc != NULL)
    1850           8 :         PinTupleDesc(tupDesc);
    1851           8 :     return tupDesc;
    1852             : }
    1853             : 
    1854             : /*
    1855             :  * lookup_rowtype_tupdesc_copy
    1856             :  *
    1857             :  * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
    1858             :  * copied into the CurrentMemoryContext and is not reference-counted.
    1859             :  */
    1860             : TupleDesc
    1861       66368 : lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
    1862             : {
    1863             :     TupleDesc   tmp;
    1864             : 
    1865       66368 :     tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
    1866       66368 :     return CreateTupleDescCopyConstr(tmp);
    1867             : }
    1868             : 
    1869             : /*
    1870             :  * lookup_rowtype_tupdesc_domain
    1871             :  *
    1872             :  * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
    1873             :  * a domain over a named composite type; so this is effectively equivalent to
    1874             :  * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
    1875             :  * except for being a tad faster.
    1876             :  *
    1877             :  * Note: the reason we don't fold the look-through-domain behavior into plain
    1878             :  * lookup_rowtype_tupdesc() is that we want callers to know they might be
    1879             :  * dealing with a domain.  Otherwise they might construct a tuple that should
    1880             :  * be of the domain type, but not apply domain constraints.
    1881             :  */
    1882             : TupleDesc
    1883        1150 : lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
    1884             : {
    1885             :     TupleDesc   tupDesc;
    1886             : 
    1887        1150 :     if (type_id != RECORDOID)
    1888             :     {
    1889             :         /*
    1890             :          * Check for domain or named composite type.  We might as well load
    1891             :          * whichever data is needed.
    1892             :          */
    1893             :         TypeCacheEntry *typentry;
    1894             : 
    1895        1132 :         typentry = lookup_type_cache(type_id,
    1896             :                                      TYPECACHE_TUPDESC |
    1897             :                                      TYPECACHE_DOMAIN_BASE_INFO);
    1898        1132 :         if (typentry->typtype == TYPTYPE_DOMAIN)
    1899           8 :             return lookup_rowtype_tupdesc_noerror(typentry->domainBaseType,
    1900             :                                                   typentry->domainBaseTypmod,
    1901             :                                                   noError);
    1902        1124 :         if (typentry->tupDesc == NULL && !noError)
    1903           0 :             ereport(ERROR,
    1904             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    1905             :                      errmsg("type %s is not composite",
    1906             :                             format_type_be(type_id))));
    1907        1124 :         tupDesc = typentry->tupDesc;
    1908             :     }
    1909             :     else
    1910          18 :         tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
    1911        1142 :     if (tupDesc != NULL)
    1912        1126 :         PinTupleDesc(tupDesc);
    1913        1142 :     return tupDesc;
    1914             : }
    1915             : 
    1916             : /*
    1917             :  * Hash function for the hash table of RecordCacheEntry.
    1918             :  */
    1919             : static uint32
    1920      251646 : record_type_typmod_hash(const void *data, size_t size)
    1921             : {
    1922      251646 :     RecordCacheEntry *entry = (RecordCacheEntry *) data;
    1923             : 
    1924      251646 :     return hashTupleDesc(entry->tupdesc);
    1925             : }
    1926             : 
    1927             : /*
    1928             :  * Match function for the hash table of RecordCacheEntry.
    1929             :  */
    1930             : static int
    1931      213610 : record_type_typmod_compare(const void *a, const void *b, size_t size)
    1932             : {
    1933      213610 :     RecordCacheEntry *left = (RecordCacheEntry *) a;
    1934      213610 :     RecordCacheEntry *right = (RecordCacheEntry *) b;
    1935             : 
    1936      213610 :     return equalTupleDescs(left->tupdesc, right->tupdesc) ? 0 : 1;
    1937             : }
    1938             : 
    1939             : /*
    1940             :  * assign_record_type_typmod
    1941             :  *
    1942             :  * Given a tuple descriptor for a RECORD type, find or create a cache entry
    1943             :  * for the type, and set the tupdesc's tdtypmod field to a value that will
    1944             :  * identify this cache entry to lookup_rowtype_tupdesc.
    1945             :  */
    1946             : void
    1947      229646 : assign_record_type_typmod(TupleDesc tupDesc)
    1948             : {
    1949             :     RecordCacheEntry *recentry;
    1950             :     TupleDesc   entDesc;
    1951             :     bool        found;
    1952             :     MemoryContext oldcxt;
    1953             : 
    1954             :     Assert(tupDesc->tdtypeid == RECORDOID);
    1955             : 
    1956      229646 :     if (RecordCacheHash == NULL)
    1957             :     {
    1958             :         /* First time through: initialize the hash table */
    1959             :         HASHCTL     ctl;
    1960             : 
    1961        3594 :         ctl.keysize = sizeof(TupleDesc);    /* just the pointer */
    1962        3594 :         ctl.entrysize = sizeof(RecordCacheEntry);
    1963        3594 :         ctl.hash = record_type_typmod_hash;
    1964        3594 :         ctl.match = record_type_typmod_compare;
    1965        3594 :         RecordCacheHash = hash_create("Record information cache", 64,
    1966             :                                       &ctl,
    1967             :                                       HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
    1968             : 
    1969             :         /* Also make sure CacheMemoryContext exists */
    1970        3594 :         if (!CacheMemoryContext)
    1971           0 :             CreateCacheMemoryContext();
    1972             :     }
    1973             : 
    1974             :     /*
    1975             :      * Find a hashtable entry for this tuple descriptor. We don't use
    1976             :      * HASH_ENTER yet, because if it's missing, we need to make sure that all
    1977             :      * the allocations succeed before we create the new entry.
    1978             :      */
    1979      229646 :     recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
    1980             :                                                 (void *) &tupDesc,
    1981             :                                                 HASH_FIND, &found);
    1982      229646 :     if (found && recentry->tupdesc != NULL)
    1983             :     {
    1984      207646 :         tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
    1985      207646 :         return;
    1986             :     }
    1987             : 
    1988             :     /* Not present, so need to manufacture an entry */
    1989       22000 :     oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
    1990             : 
    1991             :     /* Look in the SharedRecordTypmodRegistry, if attached */
    1992       22000 :     entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
    1993       22000 :     if (entDesc == NULL)
    1994             :     {
    1995             :         /*
    1996             :          * Make sure we have room before we CreateTupleDescCopy() or advance
    1997             :          * NextRecordTypmod.
    1998             :          */
    1999       21964 :         ensure_record_cache_typmod_slot_exists(NextRecordTypmod);
    2000             : 
    2001             :         /* Reference-counted local cache only. */
    2002       21964 :         entDesc = CreateTupleDescCopy(tupDesc);
    2003       21964 :         entDesc->tdrefcount = 1;
    2004       21964 :         entDesc->tdtypmod = NextRecordTypmod++;
    2005             :     }
    2006             :     else
    2007             :     {
    2008          36 :         ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod);
    2009             :     }
    2010             : 
    2011       22000 :     RecordCacheArray[entDesc->tdtypmod] = entDesc;
    2012             : 
    2013             :     /* Assign a unique tupdesc identifier, too. */
    2014       22000 :     RecordIdentifierArray[entDesc->tdtypmod] = ++tupledesc_id_counter;
    2015             : 
    2016             :     /* Fully initialized; create the hash table entry */
    2017       22000 :     recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
    2018             :                                                 (void *) &tupDesc,
    2019             :                                                 HASH_ENTER, NULL);
    2020       22000 :     recentry->tupdesc = entDesc;
    2021             : 
    2022             :     /* Update the caller's tuple descriptor. */
    2023       22000 :     tupDesc->tdtypmod = entDesc->tdtypmod;
    2024             : 
    2025       22000 :     MemoryContextSwitchTo(oldcxt);
    2026             : }
    2027             : 
    2028             : /*
    2029             :  * assign_record_type_identifier
    2030             :  *
    2031             :  * Get an identifier, which will be unique over the lifespan of this backend
    2032             :  * process, for the current tuple descriptor of the specified composite type.
    2033             :  * For named composite types, the value is guaranteed to change if the type's
    2034             :  * definition does.  For registered RECORD types, the value will not change
    2035             :  * once assigned, since the registered type won't either.  If an anonymous
    2036             :  * RECORD type is specified, we return a new identifier on each call.
    2037             :  */
    2038             : uint64
    2039        3586 : assign_record_type_identifier(Oid type_id, int32 typmod)
    2040             : {
    2041        3586 :     if (type_id != RECORDOID)
    2042             :     {
    2043             :         /*
    2044             :          * It's a named composite type, so use the regular typcache.
    2045             :          */
    2046             :         TypeCacheEntry *typentry;
    2047             : 
    2048           0 :         typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
    2049           0 :         if (typentry->tupDesc == NULL)
    2050           0 :             ereport(ERROR,
    2051             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    2052             :                      errmsg("type %s is not composite",
    2053             :                             format_type_be(type_id))));
    2054             :         Assert(typentry->tupDesc_identifier != 0);
    2055           0 :         return typentry->tupDesc_identifier;
    2056             :     }
    2057             :     else
    2058             :     {
    2059             :         /*
    2060             :          * It's a transient record type, so look in our record-type table.
    2061             :          */
    2062        3586 :         if (typmod >= 0 && typmod < RecordCacheArrayLen &&
    2063          50 :             RecordCacheArray[typmod] != NULL)
    2064             :         {
    2065             :             Assert(RecordIdentifierArray[typmod] != 0);
    2066          50 :             return RecordIdentifierArray[typmod];
    2067             :         }
    2068             : 
    2069             :         /* For anonymous or unrecognized record type, generate a new ID */
    2070        3536 :         return ++tupledesc_id_counter;
    2071             :     }
    2072             : }
    2073             : 
    2074             : /*
    2075             :  * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
    2076             :  * This exists only to avoid exposing private innards of
    2077             :  * SharedRecordTypmodRegistry in a header.
    2078             :  */
    2079             : size_t
    2080          72 : SharedRecordTypmodRegistryEstimate(void)
    2081             : {
    2082          72 :     return sizeof(SharedRecordTypmodRegistry);
    2083             : }
    2084             : 
    2085             : /*
    2086             :  * Initialize 'registry' in a pre-existing shared memory region, which must be
    2087             :  * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
    2088             :  * bytes.
    2089             :  *
    2090             :  * 'area' will be used to allocate shared memory space as required for the
    2091             :  * typemod registration.  The current process, expected to be a leader process
    2092             :  * in a parallel query, will be attached automatically and its current record
    2093             :  * types will be loaded into *registry.  While attached, all calls to
    2094             :  * assign_record_type_typmod will use the shared registry.  Worker backends
    2095             :  * will need to attach explicitly.
    2096             :  *
    2097             :  * Note that this function takes 'area' and 'segment' as arguments rather than
    2098             :  * accessing them via CurrentSession, because they aren't installed there
    2099             :  * until after this function runs.
    2100             :  */
    2101             : void
    2102          72 : SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry,
    2103             :                                dsm_segment *segment,
    2104             :                                dsa_area *area)
    2105             : {
    2106             :     MemoryContext old_context;
    2107             :     dshash_table *record_table;
    2108             :     dshash_table *typmod_table;
    2109             :     int32       typmod;
    2110             : 
    2111             :     Assert(!IsParallelWorker());
    2112             : 
    2113             :     /* We can't already be attached to a shared registry. */
    2114             :     Assert(CurrentSession->shared_typmod_registry == NULL);
    2115             :     Assert(CurrentSession->shared_record_table == NULL);
    2116             :     Assert(CurrentSession->shared_typmod_table == NULL);
    2117             : 
    2118          72 :     old_context = MemoryContextSwitchTo(TopMemoryContext);
    2119             : 
    2120             :     /* Create the hash table of tuple descriptors indexed by themselves. */
    2121          72 :     record_table = dshash_create(area, &srtr_record_table_params, area);
    2122             : 
    2123             :     /* Create the hash table of tuple descriptors indexed by typmod. */
    2124          72 :     typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
    2125             : 
    2126          72 :     MemoryContextSwitchTo(old_context);
    2127             : 
    2128             :     /* Initialize the SharedRecordTypmodRegistry. */
    2129          72 :     registry->record_table_handle = dshash_get_hash_table_handle(record_table);
    2130          72 :     registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
    2131          72 :     pg_atomic_init_u32(&registry->next_typmod, NextRecordTypmod);
    2132             : 
    2133             :     /*
    2134             :      * Copy all entries from this backend's private registry into the shared
    2135             :      * registry.
    2136             :      */
    2137          76 :     for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
    2138             :     {
    2139             :         SharedTypmodTableEntry *typmod_table_entry;
    2140             :         SharedRecordTableEntry *record_table_entry;
    2141             :         SharedRecordTableKey record_table_key;
    2142             :         dsa_pointer shared_dp;
    2143             :         TupleDesc   tupdesc;
    2144             :         bool        found;
    2145             : 
    2146           4 :         tupdesc = RecordCacheArray[typmod];
    2147           4 :         if (tupdesc == NULL)
    2148           0 :             continue;
    2149             : 
    2150             :         /* Copy the TupleDesc into shared memory. */
    2151           4 :         shared_dp = share_tupledesc(area, tupdesc, typmod);
    2152             : 
    2153             :         /* Insert into the typmod table. */
    2154           4 :         typmod_table_entry = dshash_find_or_insert(typmod_table,
    2155           4 :                                                    &tupdesc->tdtypmod,
    2156             :                                                    &found);
    2157           4 :         if (found)
    2158           0 :             elog(ERROR, "cannot create duplicate shared record typmod");
    2159           4 :         typmod_table_entry->typmod = tupdesc->tdtypmod;
    2160           4 :         typmod_table_entry->shared_tupdesc = shared_dp;
    2161           4 :         dshash_release_lock(typmod_table, typmod_table_entry);
    2162             : 
    2163             :         /* Insert into the record table. */
    2164           4 :         record_table_key.shared = false;
    2165           4 :         record_table_key.u.local_tupdesc = tupdesc;
    2166           4 :         record_table_entry = dshash_find_or_insert(record_table,
    2167             :                                                    &record_table_key,
    2168             :                                                    &found);
    2169           4 :         if (!found)
    2170             :         {
    2171           4 :             record_table_entry->key.shared = true;
    2172           4 :             record_table_entry->key.u.shared_tupdesc = shared_dp;
    2173             :         }
    2174           4 :         dshash_release_lock(record_table, record_table_entry);
    2175             :     }
    2176             : 
    2177             :     /*
    2178             :      * Set up the global state that will tell assign_record_type_typmod and
    2179             :      * lookup_rowtype_tupdesc_internal about the shared registry.
    2180             :      */
    2181          72 :     CurrentSession->shared_record_table = record_table;
    2182          72 :     CurrentSession->shared_typmod_table = typmod_table;
    2183          72 :     CurrentSession->shared_typmod_registry = registry;
    2184             : 
    2185             :     /*
    2186             :      * We install a detach hook in the leader, but only to handle cleanup on
    2187             :      * failure during GetSessionDsmHandle().  Once GetSessionDsmHandle() pins
    2188             :      * the memory, the leader process will use a shared registry until it
    2189             :      * exits.
    2190             :      */
    2191          72 :     on_dsm_detach(segment, shared_record_typmod_registry_detach, (Datum) 0);
    2192          72 : }
    2193             : 
    2194             : /*
    2195             :  * Attach to 'registry', which must have been initialized already by another
    2196             :  * backend.  Future calls to assign_record_type_typmod and
    2197             :  * lookup_rowtype_tupdesc_internal will use the shared registry until the
    2198             :  * current session is detached.
    2199             :  */
    2200             : void
    2201        1692 : SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
    2202             : {
    2203             :     MemoryContext old_context;
    2204             :     dshash_table *record_table;
    2205             :     dshash_table *typmod_table;
    2206             : 
    2207             :     Assert(IsParallelWorker());
    2208             : 
    2209             :     /* We can't already be attached to a shared registry. */
    2210             :     Assert(CurrentSession != NULL);
    2211             :     Assert(CurrentSession->segment != NULL);
    2212             :     Assert(CurrentSession->area != NULL);
    2213             :     Assert(CurrentSession->shared_typmod_registry == NULL);
    2214             :     Assert(CurrentSession->shared_record_table == NULL);
    2215             :     Assert(CurrentSession->shared_typmod_table == NULL);
    2216             : 
    2217             :     /*
    2218             :      * We can't already have typmods in our local cache, because they'd clash
    2219             :      * with those imported by SharedRecordTypmodRegistryInit.  This should be
    2220             :      * a freshly started parallel worker.  If we ever support worker
    2221             :      * recycling, a worker would need to zap its local cache in between
    2222             :      * servicing different queries, in order to be able to call this and
    2223             :      * synchronize typmods with a new leader; but that's problematic because
    2224             :      * we can't be very sure that record-typmod-related state hasn't escaped
    2225             :      * to anywhere else in the process.
    2226             :      */
    2227             :     Assert(NextRecordTypmod == 0);
    2228             : 
    2229        1692 :     old_context = MemoryContextSwitchTo(TopMemoryContext);
    2230             : 
    2231             :     /* Attach to the two hash tables. */
    2232        1692 :     record_table = dshash_attach(CurrentSession->area,
    2233             :                                  &srtr_record_table_params,
    2234             :                                  registry->record_table_handle,
    2235        1692 :                                  CurrentSession->area);
    2236        1692 :     typmod_table = dshash_attach(CurrentSession->area,
    2237             :                                  &srtr_typmod_table_params,
    2238             :                                  registry->typmod_table_handle,
    2239             :                                  NULL);
    2240             : 
    2241        1692 :     MemoryContextSwitchTo(old_context);
    2242             : 
    2243             :     /*
    2244             :      * Set up detach hook to run at worker exit.  Currently this is the same
    2245             :      * as the leader's detach hook, but in future they might need to be
    2246             :      * different.
    2247             :      */
    2248        1692 :     on_dsm_detach(CurrentSession->segment,
    2249             :                   shared_record_typmod_registry_detach,
    2250             :                   PointerGetDatum(registry));
    2251             : 
    2252             :     /*
    2253             :      * Set up the session state that will tell assign_record_type_typmod and
    2254             :      * lookup_rowtype_tupdesc_internal about the shared registry.
    2255             :      */
    2256        1692 :     CurrentSession->shared_typmod_registry = registry;
    2257        1692 :     CurrentSession->shared_record_table = record_table;
    2258        1692 :     CurrentSession->shared_typmod_table = typmod_table;
    2259        1692 : }
    2260             : 
    2261             : /*
    2262             :  * TypeCacheRelCallback
    2263             :  *      Relcache inval callback function
    2264             :  *
    2265             :  * Delete the cached tuple descriptor (if any) for the given rel's composite
    2266             :  * type, or for all composite types if relid == InvalidOid.  Also reset
    2267             :  * whatever info we have cached about the composite type's comparability.
    2268             :  *
    2269             :  * This is called when a relcache invalidation event occurs for the given
    2270             :  * relid.  We must scan the whole typcache hash since we don't know the
    2271             :  * type OID corresponding to the relid.  We could do a direct search if this
    2272             :  * were a syscache-flush callback on pg_type, but then we would need all
    2273             :  * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
    2274             :  * invals against the rel's pg_type OID.  The extra SI signaling could very
    2275             :  * well cost more than we'd save, since in most usages there are not very
    2276             :  * many entries in a backend's typcache.  The risk of bugs-of-omission seems
    2277             :  * high, too.
    2278             :  *
    2279             :  * Another possibility, with only localized impact, is to maintain a second
    2280             :  * hashtable that indexes composite-type typcache entries by their typrelid.
    2281             :  * But it's still not clear it's worth the trouble.
    2282             :  */
    2283             : static void
    2284     1295368 : TypeCacheRelCallback(Datum arg, Oid relid)
    2285             : {
    2286             :     HASH_SEQ_STATUS status;
    2287             :     TypeCacheEntry *typentry;
    2288             : 
    2289             :     /* TypeCacheHash must exist, else this callback wouldn't be registered */
    2290     1295368 :     hash_seq_init(&status, TypeCacheHash);
    2291    16115746 :     while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
    2292             :     {
    2293    14820378 :         if (typentry->typtype == TYPTYPE_COMPOSITE)
    2294             :         {
    2295             :             /* Skip if no match, unless we're zapping all composite types */
    2296     3291320 :             if (relid != typentry->typrelid && relid != InvalidOid)
    2297     3280736 :                 continue;
    2298             : 
    2299             :             /* Delete tupdesc if we have it */
    2300       10584 :             if (typentry->tupDesc != NULL)
    2301             :             {
    2302             :                 /*
    2303             :                  * Release our refcount, and free the tupdesc if none remain.
    2304             :                  * (Can't use DecrTupleDescRefCount because this reference is
    2305             :                  * not logged in current resource owner.)
    2306             :                  */
    2307             :                 Assert(typentry->tupDesc->tdrefcount > 0);
    2308        3034 :                 if (--typentry->tupDesc->tdrefcount == 0)
    2309        1886 :                     FreeTupleDesc(typentry->tupDesc);
    2310        3034 :                 typentry->tupDesc = NULL;
    2311             : 
    2312             :                 /*
    2313             :                  * Also clear tupDesc_identifier, so that anything watching
    2314             :                  * that will realize that the tupdesc has possibly changed.
    2315             :                  * (Alternatively, we could specify that to detect possible
    2316             :                  * tupdesc change, one must check for tupDesc != NULL as well
    2317             :                  * as tupDesc_identifier being the same as what was previously
    2318             :                  * seen.  That seems error-prone.)
    2319             :                  */
    2320        3034 :                 typentry->tupDesc_identifier = 0;
    2321             :             }
    2322             : 
    2323             :             /* Reset equality/comparison/hashing validity information */
    2324       10584 :             typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
    2325             :         }
    2326    11529058 :         else if (typentry->typtype == TYPTYPE_DOMAIN)
    2327             :         {
    2328             :             /*
    2329             :              * If it's domain over composite, reset flags.  (We don't bother
    2330             :              * trying to determine whether the specific base type needs a
    2331             :              * reset.)  Note that if we haven't determined whether the base
    2332             :              * type is composite, we don't need to reset anything.
    2333             :              */
    2334     1389456 :             if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
    2335           0 :                 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
    2336             :         }
    2337             :     }
    2338     1295368 : }
    2339             : 
    2340             : /*
    2341             :  * TypeCacheTypCallback
    2342             :  *      Syscache inval callback function
    2343             :  *
    2344             :  * This is called when a syscache invalidation event occurs for any
    2345             :  * pg_type row.  If we have information cached about that type, mark
    2346             :  * it as needing to be reloaded.
    2347             :  */
    2348             : static void
    2349      567980 : TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
    2350             : {
    2351             :     HASH_SEQ_STATUS status;
    2352             :     TypeCacheEntry *typentry;
    2353             : 
    2354             :     /* TypeCacheHash must exist, else this callback wouldn't be registered */
    2355      567980 :     hash_seq_init(&status, TypeCacheHash);
    2356     5950686 :     while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
    2357             :     {
    2358             :         /* Is this the targeted type row (or it's a total cache flush)? */
    2359     5382706 :         if (hashvalue == 0 || typentry->type_id_hash == hashvalue)
    2360             :         {
    2361             :             /*
    2362             :              * Mark the data obtained directly from pg_type as invalid.  Also,
    2363             :              * if it's a domain, typnotnull might've changed, so we'll need to
    2364             :              * recalculate its constraints.
    2365             :              */
    2366        3586 :             typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
    2367             :                                  TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS);
    2368             :         }
    2369             :     }
    2370      567980 : }
    2371             : 
    2372             : /*
    2373             :  * TypeCacheOpcCallback
    2374             :  *      Syscache inval callback function
    2375             :  *
    2376             :  * This is called when a syscache invalidation event occurs for any pg_opclass
    2377             :  * row.  In principle we could probably just invalidate data dependent on the
    2378             :  * particular opclass, but since updates on pg_opclass are rare in production
    2379             :  * it doesn't seem worth a lot of complication: we just mark all cached data
    2380             :  * invalid.
    2381             :  *
    2382             :  * Note that we don't bother watching for updates on pg_amop or pg_amproc.
    2383             :  * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
    2384             :  * is not allowed to be used to add/drop the primary operators and functions
    2385             :  * of an opclass, only cross-type members of a family; and the latter sorts
    2386             :  * of members are not going to get cached here.
    2387             :  */
    2388             : static void
    2389         896 : TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
    2390             : {
    2391             :     HASH_SEQ_STATUS status;
    2392             :     TypeCacheEntry *typentry;
    2393             : 
    2394             :     /* TypeCacheHash must exist, else this callback wouldn't be registered */
    2395         896 :     hash_seq_init(&status, TypeCacheHash);
    2396        5184 :     while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
    2397             :     {
    2398             :         /* Reset equality/comparison/hashing validity information */
    2399        4288 :         typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
    2400             :     }
    2401         896 : }
    2402             : 
    2403             : /*
    2404             :  * TypeCacheConstrCallback
    2405             :  *      Syscache inval callback function
    2406             :  *
    2407             :  * This is called when a syscache invalidation event occurs for any
    2408             :  * pg_constraint row.  We flush information about domain constraints
    2409             :  * when this happens.
    2410             :  *
    2411             :  * It's slightly annoying that we can't tell whether the inval event was for
    2412             :  * a domain constraint record or not; there's usually more update traffic
    2413             :  * for table constraints than domain constraints, so we'll do a lot of
    2414             :  * useless flushes.  Still, this is better than the old no-caching-at-all
    2415             :  * approach to domain constraints.
    2416             :  */
    2417             : static void
    2418       48204 : TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
    2419             : {
    2420             :     TypeCacheEntry *typentry;
    2421             : 
    2422             :     /*
    2423             :      * Because this is called very frequently, and typically very few of the
    2424             :      * typcache entries are for domains, we don't use hash_seq_search here.
    2425             :      * Instead we thread all the domain-type entries together so that we can
    2426             :      * visit them cheaply.
    2427             :      */
    2428       85552 :     for (typentry = firstDomainTypeEntry;
    2429             :          typentry != NULL;
    2430       37348 :          typentry = typentry->nextDomain)
    2431             :     {
    2432             :         /* Reset domain constraint validity information */
    2433       37348 :         typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
    2434             :     }
    2435       48204 : }
    2436             : 
    2437             : 
    2438             : /*
    2439             :  * Check if given OID is part of the subset that's sortable by comparisons
    2440             :  */
    2441             : static inline bool
    2442      300080 : enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
    2443             : {
    2444             :     Oid         offset;
    2445             : 
    2446      300080 :     if (arg < enumdata->bitmap_base)
    2447           0 :         return false;
    2448      300080 :     offset = arg - enumdata->bitmap_base;
    2449      300080 :     if (offset > (Oid) INT_MAX)
    2450           0 :         return false;
    2451      300080 :     return bms_is_member((int) offset, enumdata->sorted_values);
    2452             : }
    2453             : 
    2454             : 
    2455             : /*
    2456             :  * compare_values_of_enum
    2457             :  *      Compare two members of an enum type.
    2458             :  *      Return <0, 0, or >0 according as arg1 <, =, or > arg2.
    2459             :  *
    2460             :  * Note: currently, the enumData cache is refreshed only if we are asked
    2461             :  * to compare an enum value that is not already in the cache.  This is okay
    2462             :  * because there is no support for re-ordering existing values, so comparisons
    2463             :  * of previously cached values will return the right answer even if other
    2464             :  * values have been added since we last loaded the cache.
    2465             :  *
    2466             :  * Note: the enum logic has a special-case rule about even-numbered versus
    2467             :  * odd-numbered OIDs, but we take no account of that rule here; this
    2468             :  * routine shouldn't even get called when that rule applies.
    2469             :  */
    2470             : int
    2471      150052 : compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
    2472             : {
    2473             :     TypeCacheEnumData *enumdata;
    2474             :     EnumItem   *item1;
    2475             :     EnumItem   *item2;
    2476             : 
    2477             :     /*
    2478             :      * Equal OIDs are certainly equal --- this case was probably handled by
    2479             :      * our caller, but we may as well check.
    2480             :      */
    2481      150052 :     if (arg1 == arg2)
    2482           0 :         return 0;
    2483             : 
    2484             :     /* Load up the cache if first time through */
    2485      150052 :     if (tcache->enumData == NULL)
    2486           6 :         load_enum_cache_data(tcache);
    2487      150052 :     enumdata = tcache->enumData;
    2488             : 
    2489             :     /*
    2490             :      * If both OIDs are known-sorted, we can just compare them directly.
    2491             :      */
    2492      300080 :     if (enum_known_sorted(enumdata, arg1) &&
    2493      150028 :         enum_known_sorted(enumdata, arg2))
    2494             :     {
    2495           0 :         if (arg1 < arg2)
    2496           0 :             return -1;
    2497             :         else
    2498           0 :             return 1;
    2499             :     }
    2500             : 
    2501             :     /*
    2502             :      * Slow path: we have to identify their actual sort-order positions.
    2503             :      */
    2504      150052 :     item1 = find_enumitem(enumdata, arg1);
    2505      150052 :     item2 = find_enumitem(enumdata, arg2);
    2506             : 
    2507      150052 :     if (item1 == NULL || item2 == NULL)
    2508             :     {
    2509             :         /*
    2510             :          * We couldn't find one or both values.  That means the enum has
    2511             :          * changed under us, so re-initialize the cache and try again. We
    2512             :          * don't bother retrying the known-sorted case in this path.
    2513             :          */
    2514           0 :         load_enum_cache_data(tcache);
    2515           0 :         enumdata = tcache->enumData;
    2516             : 
    2517           0 :         item1 = find_enumitem(enumdata, arg1);
    2518           0 :         item2 = find_enumitem(enumdata, arg2);
    2519             : 
    2520             :         /*
    2521             :          * If we still can't find the values, complain: we must have corrupt
    2522             :          * data.
    2523             :          */
    2524           0 :         if (item1 == NULL)
    2525           0 :             elog(ERROR, "enum value %u not found in cache for enum %s",
    2526             :                  arg1, format_type_be(tcache->type_id));
    2527           0 :         if (item2 == NULL)
    2528           0 :             elog(ERROR, "enum value %u not found in cache for enum %s",
    2529             :                  arg2, format_type_be(tcache->type_id));
    2530             :     }
    2531             : 
    2532      150052 :     if (item1->sort_order < item2->sort_order)
    2533       50016 :         return -1;
    2534      100036 :     else if (item1->sort_order > item2->sort_order)
    2535      100036 :         return 1;
    2536             :     else
    2537           0 :         return 0;
    2538             : }
    2539             : 
    2540             : /*
    2541             :  * Load (or re-load) the enumData member of the typcache entry.
    2542             :  */
    2543             : static void
    2544           6 : load_enum_cache_data(TypeCacheEntry *tcache)
    2545             : {
    2546             :     TypeCacheEnumData *enumdata;
    2547             :     Relation    enum_rel;
    2548             :     SysScanDesc enum_scan;
    2549             :     HeapTuple   enum_tuple;
    2550             :     ScanKeyData skey;
    2551             :     EnumItem   *items;
    2552             :     int         numitems;
    2553             :     int         maxitems;
    2554             :     Oid         bitmap_base;
    2555             :     Bitmapset  *bitmap;
    2556             :     MemoryContext oldcxt;
    2557             :     int         bm_size,
    2558             :                 start_pos;
    2559             : 
    2560             :     /* Check that this is actually an enum */
    2561           6 :     if (tcache->typtype != TYPTYPE_ENUM)
    2562           0 :         ereport(ERROR,
    2563             :                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    2564             :                  errmsg("%s is not an enum",
    2565             :                         format_type_be(tcache->type_id))));
    2566             : 
    2567             :     /*
    2568             :      * Read all the information for members of the enum type.  We collect the
    2569             :      * info in working memory in the caller's context, and then transfer it to
    2570             :      * permanent memory in CacheMemoryContext.  This minimizes the risk of
    2571             :      * leaking memory from CacheMemoryContext in the event of an error partway
    2572             :      * through.
    2573             :      */
    2574           6 :     maxitems = 64;
    2575           6 :     items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
    2576           6 :     numitems = 0;
    2577             : 
    2578             :     /* Scan pg_enum for the members of the target enum type. */
    2579           6 :     ScanKeyInit(&skey,
    2580             :                 Anum_pg_enum_enumtypid,
    2581             :                 BTEqualStrategyNumber, F_OIDEQ,
    2582           6 :                 ObjectIdGetDatum(tcache->type_id));
    2583             : 
    2584           6 :     enum_rel = table_open(EnumRelationId, AccessShareLock);
    2585           6 :     enum_scan = systable_beginscan(enum_rel,
    2586             :                                    EnumTypIdLabelIndexId,
    2587             :                                    true, NULL,
    2588             :                                    1, &skey);
    2589             : 
    2590          46 :     while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
    2591             :     {
    2592          40 :         Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
    2593             : 
    2594          40 :         if (numitems >= maxitems)
    2595             :         {
    2596           0 :             maxitems *= 2;
    2597           0 :             items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
    2598             :         }
    2599          40 :         items[numitems].enum_oid = en->oid;
    2600          40 :         items[numitems].sort_order = en->enumsortorder;
    2601          40 :         numitems++;
    2602             :     }
    2603             : 
    2604           6 :     systable_endscan(enum_scan);
    2605           6 :     table_close(enum_rel, AccessShareLock);
    2606             : 
    2607             :     /* Sort the items into OID order */
    2608           6 :     qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
    2609             : 
    2610             :     /*
    2611             :      * Here, we create a bitmap listing a subset of the enum's OIDs that are
    2612             :      * known to be in order and can thus be compared with just OID comparison.
    2613             :      *
    2614             :      * The point of this is that the enum's initial OIDs were certainly in
    2615             :      * order, so there is some subset that can be compared via OID comparison;
    2616             :      * and we'd rather not do binary searches unnecessarily.
    2617             :      *
    2618             :      * This is somewhat heuristic, and might identify a subset of OIDs that
    2619             :      * isn't exactly what the type started with.  That's okay as long as the
    2620             :      * subset is correctly sorted.
    2621             :      */
    2622           6 :     bitmap_base = InvalidOid;
    2623           6 :     bitmap = NULL;
    2624           6 :     bm_size = 1;                /* only save sets of at least 2 OIDs */
    2625             : 
    2626          14 :     for (start_pos = 0; start_pos < numitems - 1; start_pos++)
    2627             :     {
    2628             :         /*
    2629             :          * Identify longest sorted subsequence starting at start_pos
    2630             :          */
    2631          14 :         Bitmapset  *this_bitmap = bms_make_singleton(0);
    2632          14 :         int         this_bm_size = 1;
    2633          14 :         Oid         start_oid = items[start_pos].enum_oid;
    2634          14 :         float4      prev_order = items[start_pos].sort_order;
    2635             :         int         i;
    2636             : 
    2637          92 :         for (i = start_pos + 1; i < numitems; i++)
    2638             :         {
    2639             :             Oid         offset;
    2640             : 
    2641          78 :             offset = items[i].enum_oid - start_oid;
    2642             :             /* quit if bitmap would be too large; cutoff is arbitrary */
    2643          78 :             if (offset >= 8192)
    2644           0 :                 break;
    2645             :             /* include the item if it's in-order */
    2646          78 :             if (items[i].sort_order > prev_order)
    2647             :             {
    2648          40 :                 prev_order = items[i].sort_order;
    2649          40 :                 this_bitmap = bms_add_member(this_bitmap, (int) offset);
    2650          40 :                 this_bm_size++;
    2651             :             }
    2652             :         }
    2653             : 
    2654             :         /* Remember it if larger than previous best */
    2655          14 :         if (this_bm_size > bm_size)
    2656             :         {
    2657           6 :             bms_free(bitmap);
    2658           6 :             bitmap_base = start_oid;
    2659           6 :             bitmap = this_bitmap;
    2660           6 :             bm_size = this_bm_size;
    2661             :         }
    2662             :         else
    2663           8 :             bms_free(this_bitmap);
    2664             : 
    2665             :         /*
    2666             :          * Done if it's not possible to find a longer sequence in the rest of
    2667             :          * the list.  In typical cases this will happen on the first
    2668             :          * iteration, which is why we create the bitmaps on the fly instead of
    2669             :          * doing a second pass over the list.
    2670             :          */
    2671          14 :         if (bm_size >= (numitems - start_pos - 1))
    2672           6 :             break;
    2673             :     }
    2674             : 
    2675             :     /* OK, copy the data into CacheMemoryContext */
    2676           6 :     oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
    2677             :     enumdata = (TypeCacheEnumData *)
    2678           6 :         palloc(offsetof(TypeCacheEnumData, enum_values) +
    2679           6 :                numitems * sizeof(EnumItem));
    2680           6 :     enumdata->bitmap_base = bitmap_base;
    2681           6 :     enumdata->sorted_values = bms_copy(bitmap);
    2682           6 :     enumdata->num_values = numitems;
    2683           6 :     memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
    2684           6 :     MemoryContextSwitchTo(oldcxt);
    2685             : 
    2686           6 :     pfree(items);
    2687           6 :     bms_free(bitmap);
    2688             : 
    2689             :     /* And link the finished cache struct into the typcache */
    2690           6 :     if (tcache->enumData != NULL)
    2691           0 :         pfree(tcache->enumData);
    2692           6 :     tcache->enumData = enumdata;
    2693           6 : }
    2694             : 
    2695             : /*
    2696             :  * Locate the EnumItem with the given OID, if present
    2697             :  */
    2698             : static EnumItem *
    2699      300104 : find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
    2700             : {
    2701             :     EnumItem    srch;
    2702             : 
    2703             :     /* On some versions of Solaris, bsearch of zero items dumps core */
    2704      300104 :     if (enumdata->num_values <= 0)
    2705           0 :         return NULL;
    2706             : 
    2707      300104 :     srch.enum_oid = arg;
    2708      300104 :     return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
    2709             :                    sizeof(EnumItem), enum_oid_cmp);
    2710             : }
    2711             : 
    2712             : /*
    2713             :  * qsort comparison function for OID-ordered EnumItems
    2714             :  */
    2715             : static int
    2716      600360 : enum_oid_cmp(const void *left, const void *right)
    2717             : {
    2718      600360 :     const EnumItem *l = (const EnumItem *) left;
    2719      600360 :     const EnumItem *r = (const EnumItem *) right;
    2720             : 
    2721      600360 :     if (l->enum_oid < r->enum_oid)
    2722      150132 :         return -1;
    2723      450228 :     else if (l->enum_oid > r->enum_oid)
    2724      150124 :         return 1;
    2725             :     else
    2726      300104 :         return 0;
    2727             : }
    2728             : 
    2729             : /*
    2730             :  * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
    2731             :  * to the given value and return a dsa_pointer.
    2732             :  */
    2733             : static dsa_pointer
    2734          36 : share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
    2735             : {
    2736             :     dsa_pointer shared_dp;
    2737             :     TupleDesc   shared;
    2738             : 
    2739          36 :     shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
    2740          36 :     shared = (TupleDesc) dsa_get_address(area, shared_dp);
    2741          36 :     TupleDescCopy(shared, tupdesc);
    2742          36 :     shared->tdtypmod = typmod;
    2743             : 
    2744          36 :     return shared_dp;
    2745             : }
    2746             : 
    2747             : /*
    2748             :  * If we are attached to a SharedRecordTypmodRegistry, use it to find or
    2749             :  * create a shared TupleDesc that matches 'tupdesc'.  Otherwise return NULL.
    2750             :  * Tuple descriptors returned by this function are not reference counted, and
    2751             :  * will exist at least as long as the current backend remained attached to the
    2752             :  * current session.
    2753             :  */
    2754             : static TupleDesc
    2755       22000 : find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
    2756             : {
    2757             :     TupleDesc   result;
    2758             :     SharedRecordTableKey key;
    2759             :     SharedRecordTableEntry *record_table_entry;
    2760             :     SharedTypmodTableEntry *typmod_table_entry;
    2761             :     dsa_pointer shared_dp;
    2762             :     bool        found;
    2763             :     uint32      typmod;
    2764             : 
    2765             :     /* If not even attached, nothing to do. */
    2766       22000 :     if (CurrentSession->shared_typmod_registry == NULL)
    2767       21964 :         return NULL;
    2768             : 
    2769             :     /* Try to find a matching tuple descriptor in the record table. */
    2770          36 :     key.shared = false;
    2771          36 :     key.u.local_tupdesc = tupdesc;
    2772             :     record_table_entry = (SharedRecordTableEntry *)
    2773          36 :         dshash_find(CurrentSession->shared_record_table, &key, false);
    2774          36 :     if (record_table_entry)
    2775             :     {
    2776             :         Assert(record_table_entry->key.shared);
    2777           4 :         dshash_release_lock(CurrentSession->shared_record_table,
    2778             :                             record_table_entry);
    2779             :         result = (TupleDesc)
    2780           4 :             dsa_get_address(CurrentSession->area,
    2781             :                             record_table_entry->key.u.shared_tupdesc);
    2782             :         Assert(result->tdrefcount == -1);
    2783             : 
    2784           4 :         return result;
    2785             :     }
    2786             : 
    2787             :     /* Allocate a new typmod number.  This will be wasted if we error out. */
    2788          32 :     typmod = (int)
    2789          32 :         pg_atomic_fetch_add_u32(&CurrentSession->shared_typmod_registry->next_typmod,
    2790             :                                 1);
    2791             : 
    2792             :     /* Copy the TupleDesc into shared memory. */
    2793          32 :     shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
    2794             : 
    2795             :     /*
    2796             :      * Create an entry in the typmod table so that others will understand this
    2797             :      * typmod number.
    2798             :      */
    2799          32 :     PG_TRY();
    2800             :     {
    2801             :         typmod_table_entry = (SharedTypmodTableEntry *)
    2802          32 :             dshash_find_or_insert(CurrentSession->shared_typmod_table,
    2803             :                                   &typmod, &found);
    2804          32 :         if (found)
    2805           0 :             elog(ERROR, "cannot create duplicate shared record typmod");
    2806             :     }
    2807           0 :     PG_CATCH();
    2808             :     {
    2809           0 :         dsa_free(CurrentSession->area, shared_dp);
    2810           0 :         PG_RE_THROW();
    2811             :     }
    2812          32 :     PG_END_TRY();
    2813          32 :     typmod_table_entry->typmod = typmod;
    2814          32 :     typmod_table_entry->shared_tupdesc = shared_dp;
    2815          32 :     dshash_release_lock(CurrentSession->shared_typmod_table,
    2816             :                         typmod_table_entry);
    2817             : 
    2818             :     /*
    2819             :      * Finally create an entry in the record table so others with matching
    2820             :      * tuple descriptors can reuse the typmod.
    2821             :      */
    2822             :     record_table_entry = (SharedRecordTableEntry *)
    2823          32 :         dshash_find_or_insert(CurrentSession->shared_record_table, &key,
    2824             :                               &found);
    2825          32 :     if (found)
    2826             :     {
    2827             :         /*
    2828             :          * Someone concurrently inserted a matching tuple descriptor since the
    2829             :          * first time we checked.  Use that one instead.
    2830             :          */
    2831           0 :         dshash_release_lock(CurrentSession->shared_record_table,
    2832             :                             record_table_entry);
    2833             : 
    2834             :         /* Might as well free up the space used by the one we created. */
    2835           0 :         found = dshash_delete_key(CurrentSession->shared_typmod_table,
    2836             :                                   &typmod);
    2837             :         Assert(found);
    2838           0 :         dsa_free(CurrentSession->area, shared_dp);
    2839             : 
    2840             :         /* Return the one we found. */
    2841             :         Assert(record_table_entry->key.shared);
    2842             :         result = (TupleDesc)
    2843           0 :             dsa_get_address(CurrentSession->area,
    2844             :                             record_table_entry->key.u.shared_tupdesc);
    2845             :         Assert(result->tdrefcount == -1);
    2846             : 
    2847           0 :         return result;
    2848             :     }
    2849             : 
    2850             :     /* Store it and return it. */
    2851          32 :     record_table_entry->key.shared = true;
    2852          32 :     record_table_entry->key.u.shared_tupdesc = shared_dp;
    2853          32 :     dshash_release_lock(CurrentSession->shared_record_table,
    2854             :                         record_table_entry);
    2855             :     result = (TupleDesc)
    2856          32 :         dsa_get_address(CurrentSession->area, shared_dp);
    2857             :     Assert(result->tdrefcount == -1);
    2858             : 
    2859          32 :     return result;
    2860             : }
    2861             : 
    2862             : /*
    2863             :  * On-DSM-detach hook to forget about the current shared record typmod
    2864             :  * infrastructure.  This is currently used by both leader and workers.
    2865             :  */
    2866             : static void
    2867        1764 : shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
    2868             : {
    2869             :     /* Be cautious here: maybe we didn't finish initializing. */
    2870        1764 :     if (CurrentSession->shared_record_table != NULL)
    2871             :     {
    2872        1764 :         dshash_detach(CurrentSession->shared_record_table);
    2873        1764 :         CurrentSession->shared_record_table = NULL;
    2874             :     }
    2875        1764 :     if (CurrentSession->shared_typmod_table != NULL)
    2876             :     {
    2877        1764 :         dshash_detach(CurrentSession->shared_typmod_table);
    2878        1764 :         CurrentSession->shared_typmod_table = NULL;
    2879             :     }
    2880        1764 :     CurrentSession->shared_typmod_registry = NULL;
    2881        1764 : }

Generated by: LCOV version 1.14