LCOV - code coverage report
Current view: top level - src/backend/utils/cache - typcache.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13devel Lines: 709 833 85.1 %
Date: 2019-08-24 15:07:19 Functions: 46 48 95.8 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * typcache.c
       4             :  *    POSTGRES type cache code
       5             :  *
       6             :  * The type cache exists to speed lookup of certain information about data
       7             :  * types that is not directly available from a type's pg_type row.  For
       8             :  * example, we use a type's default btree opclass, or the default hash
       9             :  * opclass if no btree opclass exists, to determine which operators should
      10             :  * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
      11             :  *
      12             :  * Several seemingly-odd choices have been made to support use of the type
      13             :  * cache by generic array and record handling routines, such as array_eq(),
      14             :  * record_cmp(), and hash_array().  Because those routines are used as index
      15             :  * support operations, they cannot leak memory.  To allow them to execute
      16             :  * efficiently, all information that they would like to re-use across calls
      17             :  * is kept in the type cache.
      18             :  *
      19             :  * Once created, a type cache entry lives as long as the backend does, so
      20             :  * there is no need for a call to release a cache entry.  If the type is
      21             :  * dropped, the cache entry simply becomes wasted storage.  This is not
      22             :  * expected to happen often, and assuming that typcache entries are good
      23             :  * permanently allows caching pointers to them in long-lived places.
      24             :  *
      25             :  * We have some provisions for updating cache entries if the stored data
      26             :  * becomes obsolete.  Information dependent on opclasses is cleared if we
      27             :  * detect updates to pg_opclass.  We also support clearing the tuple
      28             :  * descriptor and operator/function parts of a rowtype's cache entry,
      29             :  * since those may need to change as a consequence of ALTER TABLE.
      30             :  * Domain constraint changes are also tracked properly.
      31             :  *
      32             :  *
      33             :  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
      34             :  * Portions Copyright (c) 1994, Regents of the University of California
      35             :  *
      36             :  * IDENTIFICATION
      37             :  *    src/backend/utils/cache/typcache.c
      38             :  *
      39             :  *-------------------------------------------------------------------------
      40             :  */
      41             : #include "postgres.h"
      42             : 
      43             : #include <limits.h>
      44             : 
      45             : #include "access/hash.h"
      46             : #include "access/htup_details.h"
      47             : #include "access/nbtree.h"
      48             : #include "access/parallel.h"
      49             : #include "access/relation.h"
      50             : #include "access/session.h"
      51             : #include "access/table.h"
      52             : #include "catalog/indexing.h"
      53             : #include "catalog/pg_am.h"
      54             : #include "catalog/pg_constraint.h"
      55             : #include "catalog/pg_enum.h"
      56             : #include "catalog/pg_operator.h"
      57             : #include "catalog/pg_range.h"
      58             : #include "catalog/pg_type.h"
      59             : #include "commands/defrem.h"
      60             : #include "executor/executor.h"
      61             : #include "lib/dshash.h"
      62             : #include "optimizer/optimizer.h"
      63             : #include "storage/lwlock.h"
      64             : #include "utils/builtins.h"
      65             : #include "utils/catcache.h"
      66             : #include "utils/fmgroids.h"
      67             : #include "utils/inval.h"
      68             : #include "utils/lsyscache.h"
      69             : #include "utils/memutils.h"
      70             : #include "utils/rel.h"
      71             : #include "utils/snapmgr.h"
      72             : #include "utils/syscache.h"
      73             : #include "utils/typcache.h"
      74             : 
      75             : 
      76             : /* The main type cache hashtable searched by lookup_type_cache */
      77             : static HTAB *TypeCacheHash = NULL;
      78             : 
      79             : /* List of type cache entries for domain types */
      80             : static TypeCacheEntry *firstDomainTypeEntry = NULL;
      81             : 
      82             : /* Private flag bits in the TypeCacheEntry.flags field */
      83             : #define TCFLAGS_CHECKED_BTREE_OPCLASS       0x000001
      84             : #define TCFLAGS_CHECKED_HASH_OPCLASS        0x000002
      85             : #define TCFLAGS_CHECKED_EQ_OPR              0x000004
      86             : #define TCFLAGS_CHECKED_LT_OPR              0x000008
      87             : #define TCFLAGS_CHECKED_GT_OPR              0x000010
      88             : #define TCFLAGS_CHECKED_CMP_PROC            0x000020
      89             : #define TCFLAGS_CHECKED_HASH_PROC           0x000040
      90             : #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC  0x000080
      91             : #define TCFLAGS_CHECKED_ELEM_PROPERTIES     0x000100
      92             : #define TCFLAGS_HAVE_ELEM_EQUALITY          0x000200
      93             : #define TCFLAGS_HAVE_ELEM_COMPARE           0x000400
      94             : #define TCFLAGS_HAVE_ELEM_HASHING           0x000800
      95             : #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING  0x001000
      96             : #define TCFLAGS_CHECKED_FIELD_PROPERTIES    0x002000
      97             : #define TCFLAGS_HAVE_FIELD_EQUALITY         0x004000
      98             : #define TCFLAGS_HAVE_FIELD_COMPARE          0x008000
      99             : #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS  0x010000
     100             : #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE    0x020000
     101             : 
     102             : /*
     103             :  * Data stored about a domain type's constraints.  Note that we do not create
     104             :  * this struct for the common case of a constraint-less domain; we just set
     105             :  * domainData to NULL to indicate that.
     106             :  *
     107             :  * Within a DomainConstraintCache, we store expression plan trees, but the
     108             :  * check_exprstate fields of the DomainConstraintState nodes are just NULL.
     109             :  * When needed, expression evaluation nodes are built by flat-copying the
     110             :  * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
     111             :  * Such a node tree is not part of the DomainConstraintCache, but is
     112             :  * considered to belong to a DomainConstraintRef.
     113             :  */
     114             : struct DomainConstraintCache
     115             : {
     116             :     List       *constraints;    /* list of DomainConstraintState nodes */
     117             :     MemoryContext dccContext;   /* memory context holding all associated data */
     118             :     long        dccRefCount;    /* number of references to this struct */
     119             : };
     120             : 
     121             : /* Private information to support comparisons of enum values */
     122             : typedef struct
     123             : {
     124             :     Oid         enum_oid;       /* OID of one enum value */
     125             :     float4      sort_order;     /* its sort position */
     126             : } EnumItem;
     127             : 
     128             : typedef struct TypeCacheEnumData
     129             : {
     130             :     Oid         bitmap_base;    /* OID corresponding to bit 0 of bitmapset */
     131             :     Bitmapset  *sorted_values;  /* Set of OIDs known to be in order */
     132             :     int         num_values;     /* total number of values in enum */
     133             :     EnumItem    enum_values[FLEXIBLE_ARRAY_MEMBER];
     134             : } TypeCacheEnumData;
     135             : 
     136             : /*
     137             :  * We use a separate table for storing the definitions of non-anonymous
     138             :  * record types.  Once defined, a record type will be remembered for the
     139             :  * life of the backend.  Subsequent uses of the "same" record type (where
     140             :  * sameness means equalTupleDescs) will refer to the existing table entry.
     141             :  *
     142             :  * Stored record types are remembered in a linear array of TupleDescs,
     143             :  * which can be indexed quickly with the assigned typmod.  There is also
     144             :  * a hash table to speed searches for matching TupleDescs.
     145             :  */
     146             : 
     147             : typedef struct RecordCacheEntry
     148             : {
     149             :     TupleDesc   tupdesc;
     150             : } RecordCacheEntry;
     151             : 
     152             : /*
     153             :  * To deal with non-anonymous record types that are exchanged by backends
     154             :  * involved in a parallel query, we also need a shared version of the above.
     155             :  */
     156             : struct SharedRecordTypmodRegistry
     157             : {
     158             :     /* A hash table for finding a matching TupleDesc. */
     159             :     dshash_table_handle record_table_handle;
     160             :     /* A hash table for finding a TupleDesc by typmod. */
     161             :     dshash_table_handle typmod_table_handle;
     162             :     /* A source of new record typmod numbers. */
     163             :     pg_atomic_uint32 next_typmod;
     164             : };
     165             : 
     166             : /*
     167             :  * When using shared tuple descriptors as hash table keys we need a way to be
     168             :  * able to search for an equal shared TupleDesc using a backend-local
     169             :  * TupleDesc.  So we use this type which can hold either, and hash and compare
     170             :  * functions that know how to handle both.
     171             :  */
     172             : typedef struct SharedRecordTableKey
     173             : {
     174             :     union
     175             :     {
     176             :         TupleDesc   local_tupdesc;
     177             :         dsa_pointer shared_tupdesc;
     178             :     }           u;
     179             :     bool        shared;
     180             : } SharedRecordTableKey;
     181             : 
     182             : /*
     183             :  * The shared version of RecordCacheEntry.  This lets us look up a typmod
     184             :  * using a TupleDesc which may be in local or shared memory.
     185             :  */
     186             : typedef struct SharedRecordTableEntry
     187             : {
     188             :     SharedRecordTableKey key;
     189             : } SharedRecordTableEntry;
     190             : 
     191             : /*
     192             :  * An entry in SharedRecordTypmodRegistry's typmod table.  This lets us look
     193             :  * up a TupleDesc in shared memory using a typmod.
     194             :  */
     195             : typedef struct SharedTypmodTableEntry
     196             : {
     197             :     uint32      typmod;
     198             :     dsa_pointer shared_tupdesc;
     199             : } SharedTypmodTableEntry;
     200             : 
     201             : /*
     202             :  * A comparator function for SharedRecordTableKey.
     203             :  */
     204             : static int
     205          24 : shared_record_table_compare(const void *a, const void *b, size_t size,
     206             :                             void *arg)
     207             : {
     208          24 :     dsa_area   *area = (dsa_area *) arg;
     209          24 :     SharedRecordTableKey *k1 = (SharedRecordTableKey *) a;
     210          24 :     SharedRecordTableKey *k2 = (SharedRecordTableKey *) b;
     211             :     TupleDesc   t1;
     212             :     TupleDesc   t2;
     213             : 
     214          24 :     if (k1->shared)
     215           0 :         t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
     216             :     else
     217          24 :         t1 = k1->u.local_tupdesc;
     218             : 
     219          24 :     if (k2->shared)
     220          24 :         t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
     221             :     else
     222           0 :         t2 = k2->u.local_tupdesc;
     223             : 
     224          24 :     return equalTupleDescs(t1, t2) ? 0 : 1;
     225             : }
     226             : 
     227             : /*
     228             :  * A hash function for SharedRecordTableKey.
     229             :  */
     230             : static uint32
     231          76 : shared_record_table_hash(const void *a, size_t size, void *arg)
     232             : {
     233          76 :     dsa_area   *area = (dsa_area *) arg;
     234          76 :     SharedRecordTableKey *k = (SharedRecordTableKey *) a;
     235             :     TupleDesc   t;
     236             : 
     237          76 :     if (k->shared)
     238           0 :         t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
     239             :     else
     240          76 :         t = k->u.local_tupdesc;
     241             : 
     242          76 :     return hashTupleDesc(t);
     243             : }
     244             : 
     245             : /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
     246             : static const dshash_parameters srtr_record_table_params = {
     247             :     sizeof(SharedRecordTableKey),   /* unused */
     248             :     sizeof(SharedRecordTableEntry),
     249             :     shared_record_table_compare,
     250             :     shared_record_table_hash,
     251             :     LWTRANCHE_SESSION_RECORD_TABLE
     252             : };
     253             : 
     254             : /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
     255             : static const dshash_parameters srtr_typmod_table_params = {
     256             :     sizeof(uint32),
     257             :     sizeof(SharedTypmodTableEntry),
     258             :     dshash_memcmp,
     259             :     dshash_memhash,
     260             :     LWTRANCHE_SESSION_TYPMOD_TABLE
     261             : };
     262             : 
     263             : /* hashtable for recognizing registered record types */
     264             : static HTAB *RecordCacheHash = NULL;
     265             : 
     266             : /* arrays of info about registered record types, indexed by assigned typmod */
     267             : static TupleDesc *RecordCacheArray = NULL;
     268             : static uint64 *RecordIdentifierArray = NULL;
     269             : static int32 RecordCacheArrayLen = 0;   /* allocated length of above arrays */
     270             : static int32 NextRecordTypmod = 0;  /* number of entries used */
     271             : 
     272             : /*
     273             :  * Process-wide counter for generating unique tupledesc identifiers.
     274             :  * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
     275             :  * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
     276             :  */
     277             : static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER;
     278             : 
     279             : static void load_typcache_tupdesc(TypeCacheEntry *typentry);
     280             : static void load_rangetype_info(TypeCacheEntry *typentry);
     281             : static void load_domaintype_info(TypeCacheEntry *typentry);
     282             : static int  dcs_cmp(const void *a, const void *b);
     283             : static void decr_dcc_refcount(DomainConstraintCache *dcc);
     284             : static void dccref_deletion_callback(void *arg);
     285             : static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
     286             : static bool array_element_has_equality(TypeCacheEntry *typentry);
     287             : static bool array_element_has_compare(TypeCacheEntry *typentry);
     288             : static bool array_element_has_hashing(TypeCacheEntry *typentry);
     289             : static bool array_element_has_extended_hashing(TypeCacheEntry *typentry);
     290             : static void cache_array_element_properties(TypeCacheEntry *typentry);
     291             : static bool record_fields_have_equality(TypeCacheEntry *typentry);
     292             : static bool record_fields_have_compare(TypeCacheEntry *typentry);
     293             : static void cache_record_field_properties(TypeCacheEntry *typentry);
     294             : static bool range_element_has_hashing(TypeCacheEntry *typentry);
     295             : static bool range_element_has_extended_hashing(TypeCacheEntry *typentry);
     296             : static void cache_range_element_properties(TypeCacheEntry *typentry);
     297             : static void TypeCacheRelCallback(Datum arg, Oid relid);
     298             : static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
     299             : static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
     300             : static void load_enum_cache_data(TypeCacheEntry *tcache);
     301             : static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
     302             : static int  enum_oid_cmp(const void *left, const void *right);
     303             : static void shared_record_typmod_registry_detach(dsm_segment *segment,
     304             :                                                  Datum datum);
     305             : static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc);
     306             : static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
     307             :                                    uint32 typmod);
     308             : 
     309             : 
     310             : /*
     311             :  * lookup_type_cache
     312             :  *
     313             :  * Fetch the type cache entry for the specified datatype, and make sure that
     314             :  * all the fields requested by bits in 'flags' are valid.
     315             :  *
     316             :  * The result is never NULL --- we will ereport() if the passed type OID is
     317             :  * invalid.  Note however that we may fail to find one or more of the
     318             :  * values requested by 'flags'; the caller needs to check whether the fields
     319             :  * are InvalidOid or not.
     320             :  */
     321             : TypeCacheEntry *
     322      824622 : lookup_type_cache(Oid type_id, int flags)
     323             : {
     324             :     TypeCacheEntry *typentry;
     325             :     bool        found;
     326             : 
     327      824622 :     if (TypeCacheHash == NULL)
     328             :     {
     329             :         /* First time through: initialize the hash table */
     330             :         HASHCTL     ctl;
     331             : 
     332        2050 :         MemSet(&ctl, 0, sizeof(ctl));
     333        2050 :         ctl.keysize = sizeof(Oid);
     334        2050 :         ctl.entrysize = sizeof(TypeCacheEntry);
     335        2050 :         TypeCacheHash = hash_create("Type information cache", 64,
     336             :                                     &ctl, HASH_ELEM | HASH_BLOBS);
     337             : 
     338             :         /* Also set up callbacks for SI invalidations */
     339        2050 :         CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
     340        2050 :         CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
     341        2050 :         CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
     342        2050 :         CacheRegisterSyscacheCallback(TYPEOID, TypeCacheConstrCallback, (Datum) 0);
     343             : 
     344             :         /* Also make sure CacheMemoryContext exists */
     345        2050 :         if (!CacheMemoryContext)
     346           0 :             CreateCacheMemoryContext();
     347             :     }
     348             : 
     349             :     /* Try to look up an existing entry */
     350      824622 :     typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
     351             :                                               (void *) &type_id,
     352             :                                               HASH_FIND, NULL);
     353      824622 :     if (typentry == NULL)
     354             :     {
     355             :         /*
     356             :          * If we didn't find one, we want to make one.  But first look up the
     357             :          * pg_type row, just to make sure we don't make a cache entry for an
     358             :          * invalid type OID.  If the type OID is not valid, present a
     359             :          * user-facing error, since some code paths such as domain_in() allow
     360             :          * this function to be reached with a user-supplied OID.
     361             :          */
     362             :         HeapTuple   tp;
     363             :         Form_pg_type typtup;
     364             : 
     365       20638 :         tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
     366       20638 :         if (!HeapTupleIsValid(tp))
     367           0 :             ereport(ERROR,
     368             :                     (errcode(ERRCODE_UNDEFINED_OBJECT),
     369             :                      errmsg("type with OID %u does not exist", type_id)));
     370       20638 :         typtup = (Form_pg_type) GETSTRUCT(tp);
     371       20638 :         if (!typtup->typisdefined)
     372           0 :             ereport(ERROR,
     373             :                     (errcode(ERRCODE_UNDEFINED_OBJECT),
     374             :                      errmsg("type \"%s\" is only a shell",
     375             :                             NameStr(typtup->typname))));
     376             : 
     377             :         /* Now make the typcache entry */
     378       20638 :         typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
     379             :                                                   (void *) &type_id,
     380             :                                                   HASH_ENTER, &found);
     381             :         Assert(!found);         /* it wasn't there a moment ago */
     382             : 
     383       20638 :         MemSet(typentry, 0, sizeof(TypeCacheEntry));
     384       20638 :         typentry->type_id = type_id;
     385       20638 :         typentry->typlen = typtup->typlen;
     386       20638 :         typentry->typbyval = typtup->typbyval;
     387       20638 :         typentry->typalign = typtup->typalign;
     388       20638 :         typentry->typstorage = typtup->typstorage;
     389       20638 :         typentry->typtype = typtup->typtype;
     390       20638 :         typentry->typrelid = typtup->typrelid;
     391       20638 :         typentry->typelem = typtup->typelem;
     392       20638 :         typentry->typcollation = typtup->typcollation;
     393             : 
     394             :         /* If it's a domain, immediately thread it into the domain cache list */
     395       20638 :         if (typentry->typtype == TYPTYPE_DOMAIN)
     396             :         {
     397        1988 :             typentry->nextDomain = firstDomainTypeEntry;
     398        1988 :             firstDomainTypeEntry = typentry;
     399             :         }
     400             : 
     401       20638 :         ReleaseSysCache(tp);
     402             :     }
     403             : 
     404             :     /*
     405             :      * Look up opclasses if we haven't already and any dependent info is
     406             :      * requested.
     407             :      */
     408      824622 :     if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
     409             :                   TYPECACHE_CMP_PROC |
     410             :                   TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
     411      599274 :                   TYPECACHE_BTREE_OPFAMILY)) &&
     412      599274 :         !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
     413             :     {
     414             :         Oid         opclass;
     415             : 
     416       17990 :         opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
     417       17990 :         if (OidIsValid(opclass))
     418             :         {
     419       16874 :             typentry->btree_opf = get_opclass_family(opclass);
     420       16874 :             typentry->btree_opintype = get_opclass_input_type(opclass);
     421             :         }
     422             :         else
     423             :         {
     424        1116 :             typentry->btree_opf = typentry->btree_opintype = InvalidOid;
     425             :         }
     426             : 
     427             :         /*
     428             :          * Reset information derived from btree opclass.  Note in particular
     429             :          * that we'll redetermine the eq_opr even if we previously found one;
     430             :          * this matters in case a btree opclass has been added to a type that
     431             :          * previously had only a hash opclass.
     432             :          */
     433       17990 :         typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
     434             :                              TCFLAGS_CHECKED_LT_OPR |
     435             :                              TCFLAGS_CHECKED_GT_OPR |
     436             :                              TCFLAGS_CHECKED_CMP_PROC);
     437       17990 :         typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
     438             :     }
     439             : 
     440             :     /*
     441             :      * If we need to look up equality operator, and there's no btree opclass,
     442             :      * force lookup of hash opclass.
     443             :      */
     444     1412984 :     if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
     445      606232 :         !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
     446       17870 :         typentry->btree_opf == InvalidOid)
     447        1112 :         flags |= TYPECACHE_HASH_OPFAMILY;
     448             : 
     449      824622 :     if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
     450             :                   TYPECACHE_HASH_EXTENDED_PROC |
     451             :                   TYPECACHE_HASH_EXTENDED_PROC_FINFO |
     452      101362 :                   TYPECACHE_HASH_OPFAMILY)) &&
     453      101362 :         !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
     454             :     {
     455             :         Oid         opclass;
     456             : 
     457        8330 :         opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
     458        8330 :         if (OidIsValid(opclass))
     459             :         {
     460        8144 :             typentry->hash_opf = get_opclass_family(opclass);
     461        8144 :             typentry->hash_opintype = get_opclass_input_type(opclass);
     462             :         }
     463             :         else
     464             :         {
     465         186 :             typentry->hash_opf = typentry->hash_opintype = InvalidOid;
     466             :         }
     467             : 
     468             :         /*
     469             :          * Reset information derived from hash opclass.  We do *not* reset the
     470             :          * eq_opr; if we already found one from the btree opclass, that
     471             :          * decision is still good.
     472             :          */
     473        8330 :         typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
     474             :                              TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
     475        8330 :         typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
     476             :     }
     477             : 
     478             :     /*
     479             :      * Look for requested operators and functions, if we haven't already.
     480             :      */
     481     1412984 :     if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
     482      588362 :         !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
     483             :     {
     484       17870 :         Oid         eq_opr = InvalidOid;
     485             : 
     486       17870 :         if (typentry->btree_opf != InvalidOid)
     487       16758 :             eq_opr = get_opfamily_member(typentry->btree_opf,
     488             :                                          typentry->btree_opintype,
     489             :                                          typentry->btree_opintype,
     490             :                                          BTEqualStrategyNumber);
     491       18982 :         if (eq_opr == InvalidOid &&
     492        1112 :             typentry->hash_opf != InvalidOid)
     493         994 :             eq_opr = get_opfamily_member(typentry->hash_opf,
     494             :                                          typentry->hash_opintype,
     495             :                                          typentry->hash_opintype,
     496             :                                          HTEqualStrategyNumber);
     497             : 
     498             :         /*
     499             :          * If the proposed equality operator is array_eq or record_eq, check
     500             :          * to see if the element type or column types support equality.  If
     501             :          * not, array_eq or record_eq would fail at runtime, so we don't want
     502             :          * to report that the type has equality.  (We can omit similar
     503             :          * checking for ranges because ranges can't be created in the first
     504             :          * place unless their subtypes support equality.)
     505             :          */
     506       20848 :         if (eq_opr == ARRAY_EQ_OP &&
     507        2978 :             !array_element_has_equality(typentry))
     508         408 :             eq_opr = InvalidOid;
     509       17562 :         else if (eq_opr == RECORD_EQ_OP &&
     510         100 :                  !record_fields_have_equality(typentry))
     511           8 :             eq_opr = InvalidOid;
     512             : 
     513             :         /* Force update of eq_opr_finfo only if we're changing state */
     514       17870 :         if (typentry->eq_opr != eq_opr)
     515       16968 :             typentry->eq_opr_finfo.fn_oid = InvalidOid;
     516             : 
     517       17870 :         typentry->eq_opr = eq_opr;
     518             : 
     519             :         /*
     520             :          * Reset info about hash functions whenever we pick up new info about
     521             :          * equality operator.  This is so we can ensure that the hash
     522             :          * functions match the operator.
     523             :          */
     524       17870 :         typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
     525             :                              TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
     526       17870 :         typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
     527             :     }
     528     1159848 :     if ((flags & TYPECACHE_LT_OPR) &&
     529      335226 :         !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
     530             :     {
     531       17062 :         Oid         lt_opr = InvalidOid;
     532             : 
     533       17062 :         if (typentry->btree_opf != InvalidOid)
     534       16202 :             lt_opr = get_opfamily_member(typentry->btree_opf,
     535             :                                          typentry->btree_opintype,
     536             :                                          typentry->btree_opintype,
     537             :                                          BTLessStrategyNumber);
     538             : 
     539             :         /*
     540             :          * As above, make sure array_cmp or record_cmp will succeed; but again
     541             :          * we need no special check for ranges.
     542             :          */
     543       20030 :         if (lt_opr == ARRAY_LT_OP &&
     544        2968 :             !array_element_has_compare(typentry))
     545         824 :             lt_opr = InvalidOid;
     546       16316 :         else if (lt_opr == RECORD_LT_OP &&
     547          78 :                  !record_fields_have_compare(typentry))
     548           8 :             lt_opr = InvalidOid;
     549             : 
     550       17062 :         typentry->lt_opr = lt_opr;
     551       17062 :         typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
     552             :     }
     553     1158474 :     if ((flags & TYPECACHE_GT_OPR) &&
     554      333852 :         !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
     555             :     {
     556       17038 :         Oid         gt_opr = InvalidOid;
     557             : 
     558       17038 :         if (typentry->btree_opf != InvalidOid)
     559       16178 :             gt_opr = get_opfamily_member(typentry->btree_opf,
     560             :                                          typentry->btree_opintype,
     561             :                                          typentry->btree_opintype,
     562             :                                          BTGreaterStrategyNumber);
     563             : 
     564             :         /*
     565             :          * As above, make sure array_cmp or record_cmp will succeed; but again
     566             :          * we need no special check for ranges.
     567             :          */
     568       20000 :         if (gt_opr == ARRAY_GT_OP &&
     569        2962 :             !array_element_has_compare(typentry))
     570         824 :             gt_opr = InvalidOid;
     571       16292 :         else if (gt_opr == RECORD_GT_OP &&
     572          78 :                  !record_fields_have_compare(typentry))
     573           8 :             gt_opr = InvalidOid;
     574             : 
     575       17038 :         typentry->gt_opr = gt_opr;
     576       17038 :         typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
     577             :     }
     578      850366 :     if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
     579       25744 :         !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
     580             :     {
     581        2436 :         Oid         cmp_proc = InvalidOid;
     582             : 
     583        2436 :         if (typentry->btree_opf != InvalidOid)
     584        2012 :             cmp_proc = get_opfamily_proc(typentry->btree_opf,
     585             :                                          typentry->btree_opintype,
     586             :                                          typentry->btree_opintype,
     587             :                                          BTORDER_PROC);
     588             : 
     589             :         /*
     590             :          * As above, make sure array_cmp or record_cmp will succeed; but again
     591             :          * we need no special check for ranges.
     592             :          */
     593        2488 :         if (cmp_proc == F_BTARRAYCMP &&
     594          52 :             !array_element_has_compare(typentry))
     595           0 :             cmp_proc = InvalidOid;
     596        2474 :         else if (cmp_proc == F_BTRECORDCMP &&
     597          38 :                  !record_fields_have_compare(typentry))
     598           0 :             cmp_proc = InvalidOid;
     599             : 
     600             :         /* Force update of cmp_proc_finfo only if we're changing state */
     601        2436 :         if (typentry->cmp_proc != cmp_proc)
     602        2006 :             typentry->cmp_proc_finfo.fn_oid = InvalidOid;
     603             : 
     604        2436 :         typentry->cmp_proc = cmp_proc;
     605        2436 :         typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
     606             :     }
     607      925268 :     if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
     608      100646 :         !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
     609             :     {
     610        7642 :         Oid         hash_proc = InvalidOid;
     611             : 
     612             :         /*
     613             :          * We insist that the eq_opr, if one has been determined, match the
     614             :          * hash opclass; else report there is no hash function.
     615             :          */
     616       15216 :         if (typentry->hash_opf != InvalidOid &&
     617       15136 :             (!OidIsValid(typentry->eq_opr) ||
     618        7562 :              typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
     619             :                                                      typentry->hash_opintype,
     620             :                                                      typentry->hash_opintype,
     621             :                                                      HTEqualStrategyNumber)))
     622        7574 :             hash_proc = get_opfamily_proc(typentry->hash_opf,
     623             :                                           typentry->hash_opintype,
     624             :                                           typentry->hash_opintype,
     625             :                                           HASHSTANDARD_PROC);
     626             : 
     627             :         /*
     628             :          * As above, make sure hash_array will succeed.  We don't currently
     629             :          * support hashing for composite types, but when we do, we'll need
     630             :          * more logic here to check that case too.
     631             :          */
     632        7666 :         if (hash_proc == F_HASH_ARRAY &&
     633          24 :             !array_element_has_hashing(typentry))
     634           4 :             hash_proc = InvalidOid;
     635             : 
     636             :         /*
     637             :          * Likewise for hash_range.
     638             :          */
     639        7654 :         if (hash_proc == F_HASH_RANGE &&
     640          12 :             !range_element_has_hashing(typentry))
     641           4 :             hash_proc = InvalidOid;
     642             : 
     643             :         /* Force update of hash_proc_finfo only if we're changing state */
     644        7642 :         if (typentry->hash_proc != hash_proc)
     645        7290 :             typentry->hash_proc_finfo.fn_oid = InvalidOid;
     646             : 
     647        7642 :         typentry->hash_proc = hash_proc;
     648        7642 :         typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
     649             :     }
     650      824622 :     if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
     651        2646 :                   TYPECACHE_HASH_EXTENDED_PROC_FINFO)) &&
     652        2646 :         !(typentry->flags & TCFLAGS_CHECKED_HASH_EXTENDED_PROC))
     653             :     {
     654        2214 :         Oid         hash_extended_proc = InvalidOid;
     655             : 
     656             :         /*
     657             :          * We insist that the eq_opr, if one has been determined, match the
     658             :          * hash opclass; else report there is no hash function.
     659             :          */
     660        4412 :         if (typentry->hash_opf != InvalidOid &&
     661        4384 :             (!OidIsValid(typentry->eq_opr) ||
     662        2186 :              typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
     663             :                                                      typentry->hash_opintype,
     664             :                                                      typentry->hash_opintype,
     665             :                                                      HTEqualStrategyNumber)))
     666        2198 :             hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
     667             :                                                    typentry->hash_opintype,
     668             :                                                    typentry->hash_opintype,
     669             :                                                    HASHEXTENDED_PROC);
     670             : 
     671             :         /*
     672             :          * As above, make sure hash_array_extended will succeed.  We don't
     673             :          * currently support hashing for composite types, but when we do,
     674             :          * we'll need more logic here to check that case too.
     675             :          */
     676        2214 :         if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
     677           0 :             !array_element_has_extended_hashing(typentry))
     678           0 :             hash_extended_proc = InvalidOid;
     679             : 
     680             :         /*
     681             :          * Likewise for hash_range_extended.
     682             :          */
     683        2214 :         if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
     684           0 :             !range_element_has_extended_hashing(typentry))
     685           0 :             hash_extended_proc = InvalidOid;
     686             : 
     687             :         /* Force update of proc finfo only if we're changing state */
     688        2214 :         if (typentry->hash_extended_proc != hash_extended_proc)
     689        2192 :             typentry->hash_extended_proc_finfo.fn_oid = InvalidOid;
     690             : 
     691        2214 :         typentry->hash_extended_proc = hash_extended_proc;
     692        2214 :         typentry->flags |= TCFLAGS_CHECKED_HASH_EXTENDED_PROC;
     693             :     }
     694             : 
     695             :     /*
     696             :      * Set up fmgr lookup info as requested
     697             :      *
     698             :      * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
     699             :      * which is not quite right (they're really in the hash table's private
     700             :      * memory context) but this will do for our purposes.
     701             :      *
     702             :      * Note: the code above avoids invalidating the finfo structs unless the
     703             :      * referenced operator/function OID actually changes.  This is to prevent
     704             :      * unnecessary leakage of any subsidiary data attached to an finfo, since
     705             :      * that would cause session-lifespan memory leaks.
     706             :      */
     707     1049780 :     if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
     708      226106 :         typentry->eq_opr_finfo.fn_oid == InvalidOid &&
     709         948 :         typentry->eq_opr != InvalidOid)
     710             :     {
     711             :         Oid         eq_opr_func;
     712             : 
     713         944 :         eq_opr_func = get_opcode(typentry->eq_opr);
     714         944 :         if (eq_opr_func != InvalidOid)
     715         944 :             fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
     716             :                           CacheMemoryContext);
     717             :     }
     718      845214 :     if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
     719       28052 :         typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
     720        7460 :         typentry->cmp_proc != InvalidOid)
     721             :     {
     722        1838 :         fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
     723             :                       CacheMemoryContext);
     724             :     }
     725      840880 :     if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
     726       18382 :         typentry->hash_proc_finfo.fn_oid == InvalidOid &&
     727        2124 :         typentry->hash_proc != InvalidOid)
     728             :     {
     729        2124 :         fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
     730             :                       CacheMemoryContext);
     731             :     }
     732      824650 :     if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
     733          36 :         typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
     734           8 :         typentry->hash_extended_proc != InvalidOid)
     735             :     {
     736           8 :         fmgr_info_cxt(typentry->hash_extended_proc,
     737             :                       &typentry->hash_extended_proc_finfo,
     738             :                       CacheMemoryContext);
     739             :     }
     740             : 
     741             :     /*
     742             :      * If it's a composite type (row type), get tupdesc if requested
     743             :      */
     744      879756 :     if ((flags & TYPECACHE_TUPDESC) &&
     745       57576 :         typentry->tupDesc == NULL &&
     746        2442 :         typentry->typtype == TYPTYPE_COMPOSITE)
     747             :     {
     748        2328 :         load_typcache_tupdesc(typentry);
     749             :     }
     750             : 
     751             :     /*
     752             :      * If requested, get information about a range type
     753             :      */
     754      828674 :     if ((flags & TYPECACHE_RANGE_INFO) &&
     755        4196 :         typentry->rngelemtype == NULL &&
     756         144 :         typentry->typtype == TYPTYPE_RANGE)
     757             :     {
     758         144 :         load_rangetype_info(typentry);
     759             :     }
     760             : 
     761             :     /*
     762             :      * If requested, get information about a domain type
     763             :      */
     764      831582 :     if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
     765       10948 :         typentry->domainBaseType == InvalidOid &&
     766        3988 :         typentry->typtype == TYPTYPE_DOMAIN)
     767             :     {
     768         784 :         typentry->domainBaseTypmod = -1;
     769         784 :         typentry->domainBaseType =
     770         784 :             getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
     771             :     }
     772      950556 :     if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
     773      133362 :         (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
     774        7428 :         typentry->typtype == TYPTYPE_DOMAIN)
     775             :     {
     776        5254 :         load_domaintype_info(typentry);
     777             :     }
     778             : 
     779      824622 :     return typentry;
     780             : }
     781             : 
     782             : /*
     783             :  * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
     784             :  */
     785             : static void
     786        2370 : load_typcache_tupdesc(TypeCacheEntry *typentry)
     787             : {
     788             :     Relation    rel;
     789             : 
     790        2370 :     if (!OidIsValid(typentry->typrelid)) /* should not happen */
     791           0 :         elog(ERROR, "invalid typrelid for composite type %u",
     792             :              typentry->type_id);
     793        2370 :     rel = relation_open(typentry->typrelid, AccessShareLock);
     794             :     Assert(rel->rd_rel->reltype == typentry->type_id);
     795             : 
     796             :     /*
     797             :      * Link to the tupdesc and increment its refcount (we assert it's a
     798             :      * refcounted descriptor).  We don't use IncrTupleDescRefCount() for this,
     799             :      * because the reference mustn't be entered in the current resource owner;
     800             :      * it can outlive the current query.
     801             :      */
     802        2370 :     typentry->tupDesc = RelationGetDescr(rel);
     803             : 
     804             :     Assert(typentry->tupDesc->tdrefcount > 0);
     805        2370 :     typentry->tupDesc->tdrefcount++;
     806             : 
     807             :     /*
     808             :      * In future, we could take some pains to not change tupDesc_identifier if
     809             :      * the tupdesc didn't really change; but for now it's not worth it.
     810             :      */
     811        2370 :     typentry->tupDesc_identifier = ++tupledesc_id_counter;
     812             : 
     813        2370 :     relation_close(rel, AccessShareLock);
     814        2370 : }
     815             : 
     816             : /*
     817             :  * load_rangetype_info --- helper routine to set up range type information
     818             :  */
     819             : static void
     820         144 : load_rangetype_info(TypeCacheEntry *typentry)
     821             : {
     822             :     Form_pg_range pg_range;
     823             :     HeapTuple   tup;
     824             :     Oid         subtypeOid;
     825             :     Oid         opclassOid;
     826             :     Oid         canonicalOid;
     827             :     Oid         subdiffOid;
     828             :     Oid         opfamilyOid;
     829             :     Oid         opcintype;
     830             :     Oid         cmpFnOid;
     831             : 
     832             :     /* get information from pg_range */
     833         144 :     tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
     834             :     /* should not fail, since we already checked typtype ... */
     835         144 :     if (!HeapTupleIsValid(tup))
     836           0 :         elog(ERROR, "cache lookup failed for range type %u",
     837             :              typentry->type_id);
     838         144 :     pg_range = (Form_pg_range) GETSTRUCT(tup);
     839             : 
     840         144 :     subtypeOid = pg_range->rngsubtype;
     841         144 :     typentry->rng_collation = pg_range->rngcollation;
     842         144 :     opclassOid = pg_range->rngsubopc;
     843         144 :     canonicalOid = pg_range->rngcanonical;
     844         144 :     subdiffOid = pg_range->rngsubdiff;
     845             : 
     846         144 :     ReleaseSysCache(tup);
     847             : 
     848             :     /* get opclass properties and look up the comparison function */
     849         144 :     opfamilyOid = get_opclass_family(opclassOid);
     850         144 :     opcintype = get_opclass_input_type(opclassOid);
     851             : 
     852         144 :     cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
     853             :                                  BTORDER_PROC);
     854         144 :     if (!RegProcedureIsValid(cmpFnOid))
     855           0 :         elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
     856             :              BTORDER_PROC, opcintype, opcintype, opfamilyOid);
     857             : 
     858             :     /* set up cached fmgrinfo structs */
     859         144 :     fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
     860             :                   CacheMemoryContext);
     861         144 :     if (OidIsValid(canonicalOid))
     862          70 :         fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
     863             :                       CacheMemoryContext);
     864         144 :     if (OidIsValid(subdiffOid))
     865         112 :         fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
     866             :                       CacheMemoryContext);
     867             : 
     868             :     /* Lastly, set up link to the element type --- this marks data valid */
     869         144 :     typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
     870         144 : }
     871             : 
     872             : 
     873             : /*
     874             :  * load_domaintype_info --- helper routine to set up domain constraint info
     875             :  *
     876             :  * Note: we assume we're called in a relatively short-lived context, so it's
     877             :  * okay to leak data into the current context while scanning pg_constraint.
     878             :  * We build the new DomainConstraintCache data in a context underneath
     879             :  * CurrentMemoryContext, and reparent it under CacheMemoryContext when
     880             :  * complete.
     881             :  */
     882             : static void
     883        5254 : load_domaintype_info(TypeCacheEntry *typentry)
     884             : {
     885        5254 :     Oid         typeOid = typentry->type_id;
     886             :     DomainConstraintCache *dcc;
     887        5254 :     bool        notNull = false;
     888             :     DomainConstraintState **ccons;
     889             :     int         cconslen;
     890             :     Relation    conRel;
     891             :     MemoryContext oldcxt;
     892             : 
     893             :     /*
     894             :      * If we're here, any existing constraint info is stale, so release it.
     895             :      * For safety, be sure to null the link before trying to delete the data.
     896             :      */
     897        5254 :     if (typentry->domainData)
     898             :     {
     899        1464 :         dcc = typentry->domainData;
     900        1464 :         typentry->domainData = NULL;
     901        1464 :         decr_dcc_refcount(dcc);
     902             :     }
     903             : 
     904             :     /*
     905             :      * We try to optimize the common case of no domain constraints, so don't
     906             :      * create the dcc object and context until we find a constraint.  Likewise
     907             :      * for the temp sorting array.
     908             :      */
     909        5254 :     dcc = NULL;
     910        5254 :     ccons = NULL;
     911        5254 :     cconslen = 0;
     912             : 
     913             :     /*
     914             :      * Scan pg_constraint for relevant constraints.  We want to find
     915             :      * constraints for not just this domain, but any ancestor domains, so the
     916             :      * outer loop crawls up the domain stack.
     917             :      */
     918        5254 :     conRel = table_open(ConstraintRelationId, AccessShareLock);
     919             : 
     920             :     for (;;)
     921        5302 :     {
     922             :         HeapTuple   tup;
     923             :         HeapTuple   conTup;
     924             :         Form_pg_type typTup;
     925       10556 :         int         nccons = 0;
     926             :         ScanKeyData key[1];
     927             :         SysScanDesc scan;
     928             : 
     929       10556 :         tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
     930       10556 :         if (!HeapTupleIsValid(tup))
     931           0 :             elog(ERROR, "cache lookup failed for type %u", typeOid);
     932       10556 :         typTup = (Form_pg_type) GETSTRUCT(tup);
     933             : 
     934       10556 :         if (typTup->typtype != TYPTYPE_DOMAIN)
     935             :         {
     936             :             /* Not a domain, so done */
     937        5254 :             ReleaseSysCache(tup);
     938        5254 :             break;
     939             :         }
     940             : 
     941             :         /* Test for NOT NULL Constraint */
     942        5302 :         if (typTup->typnotnull)
     943          94 :             notNull = true;
     944             : 
     945             :         /* Look for CHECK Constraints on this domain */
     946        5302 :         ScanKeyInit(&key[0],
     947             :                     Anum_pg_constraint_contypid,
     948             :                     BTEqualStrategyNumber, F_OIDEQ,
     949             :                     ObjectIdGetDatum(typeOid));
     950             : 
     951        5302 :         scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
     952             :                                   NULL, 1, key);
     953             : 
     954       12890 :         while (HeapTupleIsValid(conTup = systable_getnext(scan)))
     955             :         {
     956        2286 :             Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
     957             :             Datum       val;
     958             :             bool        isNull;
     959             :             char       *constring;
     960             :             Expr       *check_expr;
     961             :             DomainConstraintState *r;
     962             : 
     963             :             /* Ignore non-CHECK constraints (presently, shouldn't be any) */
     964        2286 :             if (c->contype != CONSTRAINT_CHECK)
     965           0 :                 continue;
     966             : 
     967             :             /* Not expecting conbin to be NULL, but we'll test for it anyway */
     968        2286 :             val = fastgetattr(conTup, Anum_pg_constraint_conbin,
     969             :                               conRel->rd_att, &isNull);
     970        2286 :             if (isNull)
     971           0 :                 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
     972             :                      NameStr(typTup->typname), NameStr(c->conname));
     973             : 
     974             :             /* Convert conbin to C string in caller context */
     975        2286 :             constring = TextDatumGetCString(val);
     976             : 
     977             :             /* Create the DomainConstraintCache object and context if needed */
     978        2286 :             if (dcc == NULL)
     979             :             {
     980             :                 MemoryContext cxt;
     981             : 
     982        2256 :                 cxt = AllocSetContextCreate(CurrentMemoryContext,
     983             :                                             "Domain constraints",
     984             :                                             ALLOCSET_SMALL_SIZES);
     985        2256 :                 dcc = (DomainConstraintCache *)
     986             :                     MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
     987        2256 :                 dcc->constraints = NIL;
     988        2256 :                 dcc->dccContext = cxt;
     989        2256 :                 dcc->dccRefCount = 0;
     990             :             }
     991             : 
     992             :             /* Create node trees in DomainConstraintCache's context */
     993        2286 :             oldcxt = MemoryContextSwitchTo(dcc->dccContext);
     994             : 
     995        2286 :             check_expr = (Expr *) stringToNode(constring);
     996             : 
     997             :             /*
     998             :              * Plan the expression, since ExecInitExpr will expect that.
     999             :              *
    1000             :              * Note: caching the result of expression_planner() is not very
    1001             :              * good practice.  Ideally we'd use a CachedExpression here so
    1002             :              * that we would react promptly to, eg, changes in inlined
    1003             :              * functions.  However, because we don't support mutable domain
    1004             :              * CHECK constraints, it's not really clear that it's worth the
    1005             :              * extra overhead to do that.
    1006             :              */
    1007        2286 :             check_expr = expression_planner(check_expr);
    1008             : 
    1009        2286 :             r = makeNode(DomainConstraintState);
    1010        2286 :             r->constrainttype = DOM_CONSTRAINT_CHECK;
    1011        2286 :             r->name = pstrdup(NameStr(c->conname));
    1012        2286 :             r->check_expr = check_expr;
    1013        2286 :             r->check_exprstate = NULL;
    1014             : 
    1015        2286 :             MemoryContextSwitchTo(oldcxt);
    1016             : 
    1017             :             /* Accumulate constraints in an array, for sorting below */
    1018        2286 :             if (ccons == NULL)
    1019             :             {
    1020        2256 :                 cconslen = 8;
    1021        2256 :                 ccons = (DomainConstraintState **)
    1022        2256 :                     palloc(cconslen * sizeof(DomainConstraintState *));
    1023             :             }
    1024          30 :             else if (nccons >= cconslen)
    1025             :             {
    1026           0 :                 cconslen *= 2;
    1027           0 :                 ccons = (DomainConstraintState **)
    1028           0 :                     repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
    1029             :             }
    1030        2286 :             ccons[nccons++] = r;
    1031             :         }
    1032             : 
    1033        5302 :         systable_endscan(scan);
    1034             : 
    1035        5302 :         if (nccons > 0)
    1036             :         {
    1037             :             /*
    1038             :              * Sort the items for this domain, so that CHECKs are applied in a
    1039             :              * deterministic order.
    1040             :              */
    1041        2278 :             if (nccons > 1)
    1042           6 :                 qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
    1043             : 
    1044             :             /*
    1045             :              * Now attach them to the overall list.  Use lcons() here because
    1046             :              * constraints of parent domains should be applied earlier.
    1047             :              */
    1048        2278 :             oldcxt = MemoryContextSwitchTo(dcc->dccContext);
    1049        6842 :             while (nccons > 0)
    1050        2286 :                 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
    1051        2278 :             MemoryContextSwitchTo(oldcxt);
    1052             :         }
    1053             : 
    1054             :         /* loop to next domain in stack */
    1055        5302 :         typeOid = typTup->typbasetype;
    1056        5302 :         ReleaseSysCache(tup);
    1057             :     }
    1058             : 
    1059        5254 :     table_close(conRel, AccessShareLock);
    1060             : 
    1061             :     /*
    1062             :      * Only need to add one NOT NULL check regardless of how many domains in
    1063             :      * the stack request it.
    1064             :      */
    1065        5254 :     if (notNull)
    1066             :     {
    1067             :         DomainConstraintState *r;
    1068             : 
    1069             :         /* Create the DomainConstraintCache object and context if needed */
    1070          94 :         if (dcc == NULL)
    1071             :         {
    1072             :             MemoryContext cxt;
    1073             : 
    1074          62 :             cxt = AllocSetContextCreate(CurrentMemoryContext,
    1075             :                                         "Domain constraints",
    1076             :                                         ALLOCSET_SMALL_SIZES);
    1077          62 :             dcc = (DomainConstraintCache *)
    1078             :                 MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
    1079          62 :             dcc->constraints = NIL;
    1080          62 :             dcc->dccContext = cxt;
    1081          62 :             dcc->dccRefCount = 0;
    1082             :         }
    1083             : 
    1084             :         /* Create node trees in DomainConstraintCache's context */
    1085          94 :         oldcxt = MemoryContextSwitchTo(dcc->dccContext);
    1086             : 
    1087          94 :         r = makeNode(DomainConstraintState);
    1088             : 
    1089          94 :         r->constrainttype = DOM_CONSTRAINT_NOTNULL;
    1090          94 :         r->name = pstrdup("NOT NULL");
    1091          94 :         r->check_expr = NULL;
    1092          94 :         r->check_exprstate = NULL;
    1093             : 
    1094             :         /* lcons to apply the nullness check FIRST */
    1095          94 :         dcc->constraints = lcons(r, dcc->constraints);
    1096             : 
    1097          94 :         MemoryContextSwitchTo(oldcxt);
    1098             :     }
    1099             : 
    1100             :     /*
    1101             :      * If we made a constraint object, move it into CacheMemoryContext and
    1102             :      * attach it to the typcache entry.
    1103             :      */
    1104        5254 :     if (dcc)
    1105             :     {
    1106        2318 :         MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
    1107        2318 :         typentry->domainData = dcc;
    1108        2318 :         dcc->dccRefCount++;      /* count the typcache's reference */
    1109             :     }
    1110             : 
    1111             :     /* Either way, the typcache entry's domain data is now valid. */
    1112        5254 :     typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
    1113        5254 : }
    1114             : 
    1115             : /*
    1116             :  * qsort comparator to sort DomainConstraintState pointers by name
    1117             :  */
    1118             : static int
    1119           8 : dcs_cmp(const void *a, const void *b)
    1120             : {
    1121           8 :     const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
    1122           8 :     const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
    1123             : 
    1124           8 :     return strcmp((*ca)->name, (*cb)->name);
    1125             : }
    1126             : 
    1127             : /*
    1128             :  * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
    1129             :  * and free it if no references remain
    1130             :  */
    1131             : static void
    1132       30002 : decr_dcc_refcount(DomainConstraintCache *dcc)
    1133             : {
    1134             :     Assert(dcc->dccRefCount > 0);
    1135       30002 :     if (--(dcc->dccRefCount) <= 0)
    1136        1460 :         MemoryContextDelete(dcc->dccContext);
    1137       30002 : }
    1138             : 
    1139             : /*
    1140             :  * Context reset/delete callback for a DomainConstraintRef
    1141             :  */
    1142             : static void
    1143       30278 : dccref_deletion_callback(void *arg)
    1144             : {
    1145       30278 :     DomainConstraintRef *ref = (DomainConstraintRef *) arg;
    1146       30278 :     DomainConstraintCache *dcc = ref->dcc;
    1147             : 
    1148             :     /* Paranoia --- be sure link is nulled before trying to release */
    1149       30278 :     if (dcc)
    1150             :     {
    1151       28538 :         ref->constraints = NIL;
    1152       28538 :         ref->dcc = NULL;
    1153       28538 :         decr_dcc_refcount(dcc);
    1154             :     }
    1155       30278 : }
    1156             : 
    1157             : /*
    1158             :  * prep_domain_constraints --- prepare domain constraints for execution
    1159             :  *
    1160             :  * The expression trees stored in the DomainConstraintCache's list are
    1161             :  * converted to executable expression state trees stored in execctx.
    1162             :  */
    1163             : static List *
    1164        1888 : prep_domain_constraints(List *constraints, MemoryContext execctx)
    1165             : {
    1166        1888 :     List       *result = NIL;
    1167             :     MemoryContext oldcxt;
    1168             :     ListCell   *lc;
    1169             : 
    1170        1888 :     oldcxt = MemoryContextSwitchTo(execctx);
    1171             : 
    1172        3792 :     foreach(lc, constraints)
    1173             :     {
    1174        1904 :         DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
    1175             :         DomainConstraintState *newr;
    1176             : 
    1177        1904 :         newr = makeNode(DomainConstraintState);
    1178        1904 :         newr->constrainttype = r->constrainttype;
    1179        1904 :         newr->name = r->name;
    1180        1904 :         newr->check_expr = r->check_expr;
    1181        1904 :         newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
    1182             : 
    1183        1904 :         result = lappend(result, newr);
    1184             :     }
    1185             : 
    1186        1888 :     MemoryContextSwitchTo(oldcxt);
    1187             : 
    1188        1888 :     return result;
    1189             : }
    1190             : 
    1191             : /*
    1192             :  * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
    1193             :  *
    1194             :  * Caller must tell us the MemoryContext in which the DomainConstraintRef
    1195             :  * lives.  The ref will be cleaned up when that context is reset/deleted.
    1196             :  *
    1197             :  * Caller must also tell us whether it wants check_exprstate fields to be
    1198             :  * computed in the DomainConstraintState nodes attached to this ref.
    1199             :  * If it doesn't, we need not make a copy of the DomainConstraintState list.
    1200             :  */
    1201             : void
    1202       30306 : InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
    1203             :                         MemoryContext refctx, bool need_exprstate)
    1204             : {
    1205             :     /* Look up the typcache entry --- we assume it survives indefinitely */
    1206       30306 :     ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
    1207       30306 :     ref->need_exprstate = need_exprstate;
    1208             :     /* For safety, establish the callback before acquiring a refcount */
    1209       30306 :     ref->refctx = refctx;
    1210       30306 :     ref->dcc = NULL;
    1211       30306 :     ref->callback.func = dccref_deletion_callback;
    1212       30306 :     ref->callback.arg = (void *) ref;
    1213       30306 :     MemoryContextRegisterResetCallback(refctx, &ref->callback);
    1214             :     /* Acquire refcount if there are constraints, and set up exported list */
    1215       30306 :     if (ref->tcache->domainData)
    1216             :     {
    1217       28566 :         ref->dcc = ref->tcache->domainData;
    1218       28566 :         ref->dcc->dccRefCount++;
    1219       28566 :         if (ref->need_exprstate)
    1220        1888 :             ref->constraints = prep_domain_constraints(ref->dcc->constraints,
    1221             :                                                        ref->refctx);
    1222             :         else
    1223       26678 :             ref->constraints = ref->dcc->constraints;
    1224             :     }
    1225             :     else
    1226        1740 :         ref->constraints = NIL;
    1227       30306 : }
    1228             : 
    1229             : /*
    1230             :  * UpdateDomainConstraintRef --- recheck validity of domain constraint info
    1231             :  *
    1232             :  * If the domain's constraint set changed, ref->constraints is updated to
    1233             :  * point at a new list of cached constraints.
    1234             :  *
    1235             :  * In the normal case where nothing happened to the domain, this is cheap
    1236             :  * enough that it's reasonable (and expected) to check before *each* use
    1237             :  * of the constraint info.
    1238             :  */
    1239             : void
    1240     1377064 : UpdateDomainConstraintRef(DomainConstraintRef *ref)
    1241             : {
    1242     1377064 :     TypeCacheEntry *typentry = ref->tcache;
    1243             : 
    1244             :     /* Make sure typcache entry's data is up to date */
    1245     1377064 :     if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
    1246           0 :         typentry->typtype == TYPTYPE_DOMAIN)
    1247           0 :         load_domaintype_info(typentry);
    1248             : 
    1249             :     /* Transfer to ref object if there's new info, adjusting refcounts */
    1250     1377064 :     if (ref->dcc != typentry->domainData)
    1251             :     {
    1252             :         /* Paranoia --- be sure link is nulled before trying to release */
    1253           0 :         DomainConstraintCache *dcc = ref->dcc;
    1254             : 
    1255           0 :         if (dcc)
    1256             :         {
    1257             :             /*
    1258             :              * Note: we just leak the previous list of executable domain
    1259             :              * constraints.  Alternatively, we could keep those in a child
    1260             :              * context of ref->refctx and free that context at this point.
    1261             :              * However, in practice this code path will be taken so seldom
    1262             :              * that the extra bookkeeping for a child context doesn't seem
    1263             :              * worthwhile; we'll just allow a leak for the lifespan of refctx.
    1264             :              */
    1265           0 :             ref->constraints = NIL;
    1266           0 :             ref->dcc = NULL;
    1267           0 :             decr_dcc_refcount(dcc);
    1268             :         }
    1269           0 :         dcc = typentry->domainData;
    1270           0 :         if (dcc)
    1271             :         {
    1272           0 :             ref->dcc = dcc;
    1273           0 :             dcc->dccRefCount++;
    1274           0 :             if (ref->need_exprstate)
    1275           0 :                 ref->constraints = prep_domain_constraints(dcc->constraints,
    1276             :                                                            ref->refctx);
    1277             :             else
    1278           0 :                 ref->constraints = dcc->constraints;
    1279             :         }
    1280             :     }
    1281     1377064 : }
    1282             : 
    1283             : /*
    1284             :  * DomainHasConstraints --- utility routine to check if a domain has constraints
    1285             :  *
    1286             :  * This is defined to return false, not fail, if type is not a domain.
    1287             :  */
    1288             : bool
    1289       95628 : DomainHasConstraints(Oid type_id)
    1290             : {
    1291             :     TypeCacheEntry *typentry;
    1292             : 
    1293             :     /*
    1294             :      * Note: a side effect is to cause the typcache's domain data to become
    1295             :      * valid.  This is fine since we'll likely need it soon if there is any.
    1296             :      */
    1297       95628 :     typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
    1298             : 
    1299       95628 :     return (typentry->domainData != NULL);
    1300             : }
    1301             : 
    1302             : 
    1303             : /*
    1304             :  * array_element_has_equality and friends are helper routines to check
    1305             :  * whether we should believe that array_eq and related functions will work
    1306             :  * on the given array type or composite type.
    1307             :  *
    1308             :  * The logic above may call these repeatedly on the same type entry, so we
    1309             :  * make use of the typentry->flags field to cache the results once known.
    1310             :  * Also, we assume that we'll probably want all these facts about the type
    1311             :  * if we want any, so we cache them all using only one lookup of the
    1312             :  * component datatype(s).
    1313             :  */
    1314             : 
    1315             : static bool
    1316        2978 : array_element_has_equality(TypeCacheEntry *typentry)
    1317             : {
    1318        2978 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1319        2958 :         cache_array_element_properties(typentry);
    1320        2978 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
    1321             : }
    1322             : 
    1323             : static bool
    1324        5982 : array_element_has_compare(TypeCacheEntry *typentry)
    1325             : {
    1326        5982 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1327          56 :         cache_array_element_properties(typentry);
    1328        5982 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
    1329             : }
    1330             : 
    1331             : static bool
    1332          24 : array_element_has_hashing(TypeCacheEntry *typentry)
    1333             : {
    1334          24 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1335           0 :         cache_array_element_properties(typentry);
    1336          24 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
    1337             : }
    1338             : 
    1339             : static bool
    1340           0 : array_element_has_extended_hashing(TypeCacheEntry *typentry)
    1341             : {
    1342           0 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1343           0 :         cache_array_element_properties(typentry);
    1344           0 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
    1345             : }
    1346             : 
    1347             : static void
    1348        3014 : cache_array_element_properties(TypeCacheEntry *typentry)
    1349             : {
    1350        3014 :     Oid         elem_type = get_base_element_type(typentry->type_id);
    1351             : 
    1352        3014 :     if (OidIsValid(elem_type))
    1353             :     {
    1354             :         TypeCacheEntry *elementry;
    1355             : 
    1356        2606 :         elementry = lookup_type_cache(elem_type,
    1357             :                                       TYPECACHE_EQ_OPR |
    1358             :                                       TYPECACHE_CMP_PROC |
    1359             :                                       TYPECACHE_HASH_PROC |
    1360             :                                       TYPECACHE_HASH_EXTENDED_PROC);
    1361        2606 :         if (OidIsValid(elementry->eq_opr))
    1362        2606 :             typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
    1363        2606 :         if (OidIsValid(elementry->cmp_proc))
    1364        2190 :             typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
    1365        2606 :         if (OidIsValid(elementry->hash_proc))
    1366        2594 :             typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
    1367        2606 :         if (OidIsValid(elementry->hash_extended_proc))
    1368        2594 :             typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
    1369             :     }
    1370        3014 :     typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
    1371        3014 : }
    1372             : 
    1373             : /*
    1374             :  * Likewise, some helper functions for composite types.
    1375             :  */
    1376             : 
    1377             : static bool
    1378         100 : record_fields_have_equality(TypeCacheEntry *typentry)
    1379             : {
    1380         100 :     if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
    1381          92 :         cache_record_field_properties(typentry);
    1382         100 :     return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
    1383             : }
    1384             : 
    1385             : static bool
    1386         194 : record_fields_have_compare(TypeCacheEntry *typentry)
    1387             : {
    1388         194 :     if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
    1389          26 :         cache_record_field_properties(typentry);
    1390         194 :     return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
    1391             : }
    1392             : 
    1393             : static void
    1394         118 : cache_record_field_properties(TypeCacheEntry *typentry)
    1395             : {
    1396             :     /*
    1397             :      * For type RECORD, we can't really tell what will work, since we don't
    1398             :      * have access here to the specific anonymous type.  Just assume that
    1399             :      * everything will (we may get a failure at runtime ...)
    1400             :      */
    1401         118 :     if (typentry->type_id == RECORDOID)
    1402          12 :         typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
    1403             :                             TCFLAGS_HAVE_FIELD_COMPARE);
    1404         106 :     else if (typentry->typtype == TYPTYPE_COMPOSITE)
    1405             :     {
    1406             :         TupleDesc   tupdesc;
    1407             :         int         newflags;
    1408             :         int         i;
    1409             : 
    1410             :         /* Fetch composite type's tupdesc if we don't have it already */
    1411         106 :         if (typentry->tupDesc == NULL)
    1412          42 :             load_typcache_tupdesc(typentry);
    1413         106 :         tupdesc = typentry->tupDesc;
    1414             : 
    1415             :         /* Must bump the refcount while we do additional catalog lookups */
    1416         106 :         IncrTupleDescRefCount(tupdesc);
    1417             : 
    1418             :         /* Have each property if all non-dropped fields have the property */
    1419         106 :         newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
    1420             :                     TCFLAGS_HAVE_FIELD_COMPARE);
    1421         342 :         for (i = 0; i < tupdesc->natts; i++)
    1422             :         {
    1423             :             TypeCacheEntry *fieldentry;
    1424         244 :             Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
    1425             : 
    1426         244 :             if (attr->attisdropped)
    1427           6 :                 continue;
    1428             : 
    1429         238 :             fieldentry = lookup_type_cache(attr->atttypid,
    1430             :                                            TYPECACHE_EQ_OPR |
    1431             :                                            TYPECACHE_CMP_PROC);
    1432         238 :             if (!OidIsValid(fieldentry->eq_opr))
    1433           8 :                 newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
    1434         238 :             if (!OidIsValid(fieldentry->cmp_proc))
    1435           8 :                 newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
    1436             : 
    1437             :             /* We can drop out of the loop once we disprove all bits */
    1438         238 :             if (newflags == 0)
    1439           8 :                 break;
    1440             :         }
    1441         106 :         typentry->flags |= newflags;
    1442             : 
    1443         106 :         DecrTupleDescRefCount(tupdesc);
    1444             :     }
    1445           0 :     else if (typentry->typtype == TYPTYPE_DOMAIN)
    1446             :     {
    1447             :         /* If it's domain over composite, copy base type's properties */
    1448             :         TypeCacheEntry *baseentry;
    1449             : 
    1450             :         /* load up basetype info if we didn't already */
    1451           0 :         if (typentry->domainBaseType == InvalidOid)
    1452             :         {
    1453           0 :             typentry->domainBaseTypmod = -1;
    1454           0 :             typentry->domainBaseType =
    1455           0 :                 getBaseTypeAndTypmod(typentry->type_id,
    1456             :                                      &typentry->domainBaseTypmod);
    1457             :         }
    1458           0 :         baseentry = lookup_type_cache(typentry->domainBaseType,
    1459             :                                       TYPECACHE_EQ_OPR |
    1460             :                                       TYPECACHE_CMP_PROC);
    1461           0 :         if (baseentry->typtype == TYPTYPE_COMPOSITE)
    1462             :         {
    1463           0 :             typentry->flags |= TCFLAGS_DOMAIN_BASE_IS_COMPOSITE;
    1464           0 :             typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
    1465             :                                                    TCFLAGS_HAVE_FIELD_COMPARE);
    1466             :         }
    1467             :     }
    1468         118 :     typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
    1469         118 : }
    1470             : 
    1471             : /*
    1472             :  * Likewise, some helper functions for range types.
    1473             :  *
    1474             :  * We can borrow the flag bits for array element properties to use for range
    1475             :  * element properties, since those flag bits otherwise have no use in a
    1476             :  * range type's typcache entry.
    1477             :  */
    1478             : 
    1479             : static bool
    1480          12 : range_element_has_hashing(TypeCacheEntry *typentry)
    1481             : {
    1482          12 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1483          12 :         cache_range_element_properties(typentry);
    1484          12 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
    1485             : }
    1486             : 
    1487             : static bool
    1488           0 : range_element_has_extended_hashing(TypeCacheEntry *typentry)
    1489             : {
    1490           0 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1491           0 :         cache_range_element_properties(typentry);
    1492           0 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
    1493             : }
    1494             : 
    1495             : static void
    1496          12 : cache_range_element_properties(TypeCacheEntry *typentry)
    1497             : {
    1498             :     /* load up subtype link if we didn't already */
    1499          12 :     if (typentry->rngelemtype == NULL &&
    1500           0 :         typentry->typtype == TYPTYPE_RANGE)
    1501           0 :         load_rangetype_info(typentry);
    1502             : 
    1503          12 :     if (typentry->rngelemtype != NULL)
    1504             :     {
    1505             :         TypeCacheEntry *elementry;
    1506             : 
    1507             :         /* might need to calculate subtype's hash function properties */
    1508          12 :         elementry = lookup_type_cache(typentry->rngelemtype->type_id,
    1509             :                                       TYPECACHE_HASH_PROC |
    1510             :                                       TYPECACHE_HASH_EXTENDED_PROC);
    1511          12 :         if (OidIsValid(elementry->hash_proc))
    1512           8 :             typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
    1513          12 :         if (OidIsValid(elementry->hash_extended_proc))
    1514           8 :             typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
    1515             :     }
    1516          12 :     typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
    1517          12 : }
    1518             : 
    1519             : /*
    1520             :  * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
    1521             :  * to store 'typmod'.
    1522             :  */
    1523             : static void
    1524       11336 : ensure_record_cache_typmod_slot_exists(int32 typmod)
    1525             : {
    1526       11336 :     if (RecordCacheArray == NULL)
    1527             :     {
    1528        1382 :         RecordCacheArray = (TupleDesc *)
    1529        1382 :             MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(TupleDesc));
    1530        1382 :         RecordIdentifierArray = (uint64 *)
    1531        1382 :             MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(uint64));
    1532        1382 :         RecordCacheArrayLen = 64;
    1533             :     }
    1534             : 
    1535       11336 :     if (typmod >= RecordCacheArrayLen)
    1536             :     {
    1537           0 :         int32       newlen = RecordCacheArrayLen * 2;
    1538             : 
    1539           0 :         while (typmod >= newlen)
    1540           0 :             newlen *= 2;
    1541             : 
    1542           0 :         RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
    1543             :                                                   newlen * sizeof(TupleDesc));
    1544           0 :         memset(RecordCacheArray + RecordCacheArrayLen, 0,
    1545           0 :                (newlen - RecordCacheArrayLen) * sizeof(TupleDesc));
    1546           0 :         RecordIdentifierArray = (uint64 *) repalloc(RecordIdentifierArray,
    1547             :                                                     newlen * sizeof(uint64));
    1548           0 :         memset(RecordIdentifierArray + RecordCacheArrayLen, 0,
    1549           0 :                (newlen - RecordCacheArrayLen) * sizeof(uint64));
    1550           0 :         RecordCacheArrayLen = newlen;
    1551             :     }
    1552       11336 : }
    1553             : 
    1554             : /*
    1555             :  * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
    1556             :  *
    1557             :  * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
    1558             :  * hasn't had its refcount bumped.
    1559             :  */
    1560             : static TupleDesc
    1561       62370 : lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
    1562             : {
    1563       62370 :     if (type_id != RECORDOID)
    1564             :     {
    1565             :         /*
    1566             :          * It's a named composite type, so use the regular typcache.
    1567             :          */
    1568             :         TypeCacheEntry *typentry;
    1569             : 
    1570       44136 :         typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
    1571       44136 :         if (typentry->tupDesc == NULL && !noError)
    1572           0 :             ereport(ERROR,
    1573             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    1574             :                      errmsg("type %s is not composite",
    1575             :                             format_type_be(type_id))));
    1576       44136 :         return typentry->tupDesc;
    1577             :     }
    1578             :     else
    1579             :     {
    1580             :         /*
    1581             :          * It's a transient record type, so look in our record-type table.
    1582             :          */
    1583       18234 :         if (typmod >= 0)
    1584             :         {
    1585             :             /* It is already in our local cache? */
    1586       36436 :             if (typmod < RecordCacheArrayLen &&
    1587       18216 :                 RecordCacheArray[typmod] != NULL)
    1588       18200 :                 return RecordCacheArray[typmod];
    1589             : 
    1590             :             /* Are we attached to a shared record typmod registry? */
    1591          20 :             if (CurrentSession->shared_typmod_registry != NULL)
    1592             :             {
    1593             :                 SharedTypmodTableEntry *entry;
    1594             : 
    1595             :                 /* Try to find it in the shared typmod index. */
    1596          20 :                 entry = dshash_find(CurrentSession->shared_typmod_table,
    1597             :                                     &typmod, false);
    1598          20 :                 if (entry != NULL)
    1599             :                 {
    1600             :                     TupleDesc   tupdesc;
    1601             : 
    1602          20 :                     tupdesc = (TupleDesc)
    1603          20 :                         dsa_get_address(CurrentSession->area,
    1604             :                                         entry->shared_tupdesc);
    1605             :                     Assert(typmod == tupdesc->tdtypmod);
    1606             : 
    1607             :                     /* We may need to extend the local RecordCacheArray. */
    1608          20 :                     ensure_record_cache_typmod_slot_exists(typmod);
    1609             : 
    1610             :                     /*
    1611             :                      * Our local array can now point directly to the TupleDesc
    1612             :                      * in shared memory, which is non-reference-counted.
    1613             :                      */
    1614          20 :                     RecordCacheArray[typmod] = tupdesc;
    1615             :                     Assert(tupdesc->tdrefcount == -1);
    1616             : 
    1617             :                     /*
    1618             :                      * We don't share tupdesc identifiers across processes, so
    1619             :                      * assign one locally.
    1620             :                      */
    1621          20 :                     RecordIdentifierArray[typmod] = ++tupledesc_id_counter;
    1622             : 
    1623          20 :                     dshash_release_lock(CurrentSession->shared_typmod_table,
    1624             :                                         entry);
    1625             : 
    1626          20 :                     return RecordCacheArray[typmod];
    1627             :                 }
    1628             :             }
    1629             :         }
    1630             : 
    1631          14 :         if (!noError)
    1632           0 :             ereport(ERROR,
    1633             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    1634             :                      errmsg("record type has not been registered")));
    1635          14 :         return NULL;
    1636             :     }
    1637             : }
    1638             : 
    1639             : /*
    1640             :  * lookup_rowtype_tupdesc
    1641             :  *
    1642             :  * Given a typeid/typmod that should describe a known composite type,
    1643             :  * return the tuple descriptor for the type.  Will ereport on failure.
    1644             :  * (Use ereport because this is reachable with user-specified OIDs,
    1645             :  * for example from record_in().)
    1646             :  *
    1647             :  * Note: on success, we increment the refcount of the returned TupleDesc,
    1648             :  * and log the reference in CurrentResourceOwner.  Caller should call
    1649             :  * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
    1650             :  */
    1651             : TupleDesc
    1652       40804 : lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
    1653             : {
    1654             :     TupleDesc   tupDesc;
    1655             : 
    1656       40804 :     tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
    1657       40804 :     PinTupleDesc(tupDesc);
    1658       40804 :     return tupDesc;
    1659             : }
    1660             : 
    1661             : /*
    1662             :  * lookup_rowtype_tupdesc_noerror
    1663             :  *
    1664             :  * As above, but if the type is not a known composite type and noError
    1665             :  * is true, returns NULL instead of ereport'ing.  (Note that if a bogus
    1666             :  * type_id is passed, you'll get an ereport anyway.)
    1667             :  */
    1668             : TupleDesc
    1669           8 : lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
    1670             : {
    1671             :     TupleDesc   tupDesc;
    1672             : 
    1673           8 :     tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
    1674           8 :     if (tupDesc != NULL)
    1675           8 :         PinTupleDesc(tupDesc);
    1676           8 :     return tupDesc;
    1677             : }
    1678             : 
    1679             : /*
    1680             :  * lookup_rowtype_tupdesc_copy
    1681             :  *
    1682             :  * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
    1683             :  * copied into the CurrentMemoryContext and is not reference-counted.
    1684             :  */
    1685             : TupleDesc
    1686       21542 : lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
    1687             : {
    1688             :     TupleDesc   tmp;
    1689             : 
    1690       21542 :     tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
    1691       21542 :     return CreateTupleDescCopyConstr(tmp);
    1692             : }
    1693             : 
    1694             : /*
    1695             :  * lookup_rowtype_tupdesc_domain
    1696             :  *
    1697             :  * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
    1698             :  * a domain over a named composite type; so this is effectively equivalent to
    1699             :  * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
    1700             :  * except for being a tad faster.
    1701             :  *
    1702             :  * Note: the reason we don't fold the look-through-domain behavior into plain
    1703             :  * lookup_rowtype_tupdesc() is that we want callers to know they might be
    1704             :  * dealing with a domain.  Otherwise they might construct a tuple that should
    1705             :  * be of the domain type, but not apply domain constraints.
    1706             :  */
    1707             : TupleDesc
    1708        1132 : lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
    1709             : {
    1710             :     TupleDesc   tupDesc;
    1711             : 
    1712        1132 :     if (type_id != RECORDOID)
    1713             :     {
    1714             :         /*
    1715             :          * Check for domain or named composite type.  We might as well load
    1716             :          * whichever data is needed.
    1717             :          */
    1718             :         TypeCacheEntry *typentry;
    1719             : 
    1720        1116 :         typentry = lookup_type_cache(type_id,
    1721             :                                      TYPECACHE_TUPDESC |
    1722             :                                      TYPECACHE_DOMAIN_BASE_INFO);
    1723        1116 :         if (typentry->typtype == TYPTYPE_DOMAIN)
    1724           8 :             return lookup_rowtype_tupdesc_noerror(typentry->domainBaseType,
    1725             :                                                   typentry->domainBaseTypmod,
    1726             :                                                   noError);
    1727        1108 :         if (typentry->tupDesc == NULL && !noError)
    1728           0 :             ereport(ERROR,
    1729             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    1730             :                      errmsg("type %s is not composite",
    1731             :                             format_type_be(type_id))));
    1732        1108 :         tupDesc = typentry->tupDesc;
    1733             :     }
    1734             :     else
    1735          16 :         tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
    1736        1124 :     if (tupDesc != NULL)
    1737        1110 :         PinTupleDesc(tupDesc);
    1738        1124 :     return tupDesc;
    1739             : }
    1740             : 
    1741             : /*
    1742             :  * Hash function for the hash table of RecordCacheEntry.
    1743             :  */
    1744             : static uint32
    1745      163228 : record_type_typmod_hash(const void *data, size_t size)
    1746             : {
    1747      163228 :     RecordCacheEntry *entry = (RecordCacheEntry *) data;
    1748             : 
    1749      163228 :     return hashTupleDesc(entry->tupdesc);
    1750             : }
    1751             : 
    1752             : /*
    1753             :  * Match function for the hash table of RecordCacheEntry.
    1754             :  */
    1755             : static int
    1756      156412 : record_type_typmod_compare(const void *a, const void *b, size_t size)
    1757             : {
    1758      156412 :     RecordCacheEntry *left = (RecordCacheEntry *) a;
    1759      156412 :     RecordCacheEntry *right = (RecordCacheEntry *) b;
    1760             : 
    1761      156412 :     return equalTupleDescs(left->tupdesc, right->tupdesc) ? 0 : 1;
    1762             : }
    1763             : 
    1764             : /*
    1765             :  * assign_record_type_typmod
    1766             :  *
    1767             :  * Given a tuple descriptor for a RECORD type, find or create a cache entry
    1768             :  * for the type, and set the tupdesc's tdtypmod field to a value that will
    1769             :  * identify this cache entry to lookup_rowtype_tupdesc.
    1770             :  */
    1771             : void
    1772      163228 : assign_record_type_typmod(TupleDesc tupDesc)
    1773             : {
    1774             :     RecordCacheEntry *recentry;
    1775             :     TupleDesc   entDesc;
    1776             :     bool        found;
    1777             :     MemoryContext oldcxt;
    1778             : 
    1779             :     Assert(tupDesc->tdtypeid == RECORDOID);
    1780             : 
    1781      163228 :     if (RecordCacheHash == NULL)
    1782             :     {
    1783             :         /* First time through: initialize the hash table */
    1784             :         HASHCTL     ctl;
    1785             : 
    1786        1382 :         MemSet(&ctl, 0, sizeof(ctl));
    1787        1382 :         ctl.keysize = sizeof(TupleDesc);    /* just the pointer */
    1788        1382 :         ctl.entrysize = sizeof(RecordCacheEntry);
    1789        1382 :         ctl.hash = record_type_typmod_hash;
    1790        1382 :         ctl.match = record_type_typmod_compare;
    1791        1382 :         RecordCacheHash = hash_create("Record information cache", 64,
    1792             :                                       &ctl,
    1793             :                                       HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
    1794             : 
    1795             :         /* Also make sure CacheMemoryContext exists */
    1796        1382 :         if (!CacheMemoryContext)
    1797           0 :             CreateCacheMemoryContext();
    1798             :     }
    1799             : 
    1800             :     /* Find or create a hashtable entry for this tuple descriptor */
    1801      163228 :     recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
    1802             :                                                 (void *) &tupDesc,
    1803             :                                                 HASH_ENTER, &found);
    1804      163228 :     if (found && recentry->tupdesc != NULL)
    1805             :     {
    1806      151912 :         tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
    1807      151912 :         return;
    1808             :     }
    1809             : 
    1810             :     /* Not present, so need to manufacture an entry */
    1811       11316 :     recentry->tupdesc = NULL;
    1812       11316 :     oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
    1813             : 
    1814             :     /* Look in the SharedRecordTypmodRegistry, if attached */
    1815       11316 :     entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
    1816       11316 :     if (entDesc == NULL)
    1817             :     {
    1818             :         /* Reference-counted local cache only. */
    1819       11272 :         entDesc = CreateTupleDescCopy(tupDesc);
    1820       11272 :         entDesc->tdrefcount = 1;
    1821       11272 :         entDesc->tdtypmod = NextRecordTypmod++;
    1822             :     }
    1823       11316 :     ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod);
    1824       11316 :     RecordCacheArray[entDesc->tdtypmod] = entDesc;
    1825       11316 :     recentry->tupdesc = entDesc;
    1826             : 
    1827             :     /* Assign a unique tupdesc identifier, too. */
    1828       11316 :     RecordIdentifierArray[entDesc->tdtypmod] = ++tupledesc_id_counter;
    1829             : 
    1830             :     /* Update the caller's tuple descriptor. */
    1831       11316 :     tupDesc->tdtypmod = entDesc->tdtypmod;
    1832             : 
    1833       11316 :     MemoryContextSwitchTo(oldcxt);
    1834             : }
    1835             : 
    1836             : /*
    1837             :  * assign_record_type_identifier
    1838             :  *
    1839             :  * Get an identifier, which will be unique over the lifespan of this backend
    1840             :  * process, for the current tuple descriptor of the specified composite type.
    1841             :  * For named composite types, the value is guaranteed to change if the type's
    1842             :  * definition does.  For registered RECORD types, the value will not change
    1843             :  * once assigned, since the registered type won't either.  If an anonymous
    1844             :  * RECORD type is specified, we return a new identifier on each call.
    1845             :  */
    1846             : uint64
    1847        2338 : assign_record_type_identifier(Oid type_id, int32 typmod)
    1848             : {
    1849        2338 :     if (type_id != RECORDOID)
    1850             :     {
    1851             :         /*
    1852             :          * It's a named composite type, so use the regular typcache.
    1853             :          */
    1854             :         TypeCacheEntry *typentry;
    1855             : 
    1856           0 :         typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
    1857           0 :         if (typentry->tupDesc == NULL)
    1858           0 :             ereport(ERROR,
    1859             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    1860             :                      errmsg("type %s is not composite",
    1861             :                             format_type_be(type_id))));
    1862             :         Assert(typentry->tupDesc_identifier != 0);
    1863           0 :         return typentry->tupDesc_identifier;
    1864             :     }
    1865             :     else
    1866             :     {
    1867             :         /*
    1868             :          * It's a transient record type, so look in our record-type table.
    1869             :          */
    1870        2388 :         if (typmod >= 0 && typmod < RecordCacheArrayLen &&
    1871          50 :             RecordCacheArray[typmod] != NULL)
    1872             :         {
    1873             :             Assert(RecordIdentifierArray[typmod] != 0);
    1874          50 :             return RecordIdentifierArray[typmod];
    1875             :         }
    1876             : 
    1877             :         /* For anonymous or unrecognized record type, generate a new ID */
    1878        2288 :         return ++tupledesc_id_counter;
    1879             :     }
    1880             : }
    1881             : 
    1882             : /*
    1883             :  * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
    1884             :  * This exists only to avoid exposing private innards of
    1885             :  * SharedRecordTypmodRegistry in a header.
    1886             :  */
    1887             : size_t
    1888          44 : SharedRecordTypmodRegistryEstimate(void)
    1889             : {
    1890          44 :     return sizeof(SharedRecordTypmodRegistry);
    1891             : }
    1892             : 
    1893             : /*
    1894             :  * Initialize 'registry' in a pre-existing shared memory region, which must be
    1895             :  * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
    1896             :  * bytes.
    1897             :  *
    1898             :  * 'area' will be used to allocate shared memory space as required for the
    1899             :  * typemod registration.  The current process, expected to be a leader process
    1900             :  * in a parallel query, will be attached automatically and its current record
    1901             :  * types will be loaded into *registry.  While attached, all calls to
    1902             :  * assign_record_type_typmod will use the shared registry.  Worker backends
    1903             :  * will need to attach explicitly.
    1904             :  *
    1905             :  * Note that this function takes 'area' and 'segment' as arguments rather than
    1906             :  * accessing them via CurrentSession, because they aren't installed there
    1907             :  * until after this function runs.
    1908             :  */
    1909             : void
    1910          44 : SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry,
    1911             :                                dsm_segment *segment,
    1912             :                                dsa_area *area)
    1913             : {
    1914             :     MemoryContext old_context;
    1915             :     dshash_table *record_table;
    1916             :     dshash_table *typmod_table;
    1917             :     int32       typmod;
    1918             : 
    1919             :     Assert(!IsParallelWorker());
    1920             : 
    1921             :     /* We can't already be attached to a shared registry. */
    1922             :     Assert(CurrentSession->shared_typmod_registry == NULL);
    1923             :     Assert(CurrentSession->shared_record_table == NULL);
    1924             :     Assert(CurrentSession->shared_typmod_table == NULL);
    1925             : 
    1926          44 :     old_context = MemoryContextSwitchTo(TopMemoryContext);
    1927             : 
    1928             :     /* Create the hash table of tuple descriptors indexed by themselves. */
    1929          44 :     record_table = dshash_create(area, &srtr_record_table_params, area);
    1930             : 
    1931             :     /* Create the hash table of tuple descriptors indexed by typmod. */
    1932          44 :     typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
    1933             : 
    1934          44 :     MemoryContextSwitchTo(old_context);
    1935             : 
    1936             :     /* Initialize the SharedRecordTypmodRegistry. */
    1937          44 :     registry->record_table_handle = dshash_get_hash_table_handle(record_table);
    1938          44 :     registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
    1939          44 :     pg_atomic_init_u32(&registry->next_typmod, NextRecordTypmod);
    1940             : 
    1941             :     /*
    1942             :      * Copy all entries from this backend's private registry into the shared
    1943             :      * registry.
    1944             :      */
    1945          44 :     for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
    1946             :     {
    1947             :         SharedTypmodTableEntry *typmod_table_entry;
    1948             :         SharedRecordTableEntry *record_table_entry;
    1949             :         SharedRecordTableKey record_table_key;
    1950             :         dsa_pointer shared_dp;
    1951             :         TupleDesc   tupdesc;
    1952             :         bool        found;
    1953             : 
    1954           0 :         tupdesc = RecordCacheArray[typmod];
    1955           0 :         if (tupdesc == NULL)
    1956           0 :             continue;
    1957             : 
    1958             :         /* Copy the TupleDesc into shared memory. */
    1959           0 :         shared_dp = share_tupledesc(area, tupdesc, typmod);
    1960             : 
    1961             :         /* Insert into the typmod table. */
    1962           0 :         typmod_table_entry = dshash_find_or_insert(typmod_table,
    1963           0 :                                                    &tupdesc->tdtypmod,
    1964             :                                                    &found);
    1965           0 :         if (found)
    1966           0 :             elog(ERROR, "cannot create duplicate shared record typmod");
    1967           0 :         typmod_table_entry->typmod = tupdesc->tdtypmod;
    1968           0 :         typmod_table_entry->shared_tupdesc = shared_dp;
    1969           0 :         dshash_release_lock(typmod_table, typmod_table_entry);
    1970             : 
    1971             :         /* Insert into the record table. */
    1972           0 :         record_table_key.shared = false;
    1973           0 :         record_table_key.u.local_tupdesc = tupdesc;
    1974           0 :         record_table_entry = dshash_find_or_insert(record_table,
    1975             :                                                    &record_table_key,
    1976             :                                                    &found);
    1977           0 :         if (!found)
    1978             :         {
    1979           0 :             record_table_entry->key.shared = true;
    1980           0 :             record_table_entry->key.u.shared_tupdesc = shared_dp;
    1981             :         }
    1982           0 :         dshash_release_lock(record_table, record_table_entry);
    1983             :     }
    1984             : 
    1985             :     /*
    1986             :      * Set up the global state that will tell assign_record_type_typmod and
    1987             :      * lookup_rowtype_tupdesc_internal about the shared registry.
    1988             :      */
    1989          44 :     CurrentSession->shared_record_table = record_table;
    1990          44 :     CurrentSession->shared_typmod_table = typmod_table;
    1991          44 :     CurrentSession->shared_typmod_registry = registry;
    1992             : 
    1993             :     /*
    1994             :      * We install a detach hook in the leader, but only to handle cleanup on
    1995             :      * failure during GetSessionDsmHandle().  Once GetSessionDsmHandle() pins
    1996             :      * the memory, the leader process will use a shared registry until it
    1997             :      * exits.
    1998             :      */
    1999          44 :     on_dsm_detach(segment, shared_record_typmod_registry_detach, (Datum) 0);
    2000          44 : }
    2001             : 
    2002             : /*
    2003             :  * Attach to 'registry', which must have been initialized already by another
    2004             :  * backend.  Future calls to assign_record_type_typmod and
    2005             :  * lookup_rowtype_tupdesc_internal will use the shared registry until the
    2006             :  * current session is detached.
    2007             :  */
    2008             : void
    2009        1616 : SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
    2010             : {
    2011             :     MemoryContext old_context;
    2012             :     dshash_table *record_table;
    2013             :     dshash_table *typmod_table;
    2014             : 
    2015             :     Assert(IsParallelWorker());
    2016             : 
    2017             :     /* We can't already be attached to a shared registry. */
    2018             :     Assert(CurrentSession != NULL);
    2019             :     Assert(CurrentSession->segment != NULL);
    2020             :     Assert(CurrentSession->area != NULL);
    2021             :     Assert(CurrentSession->shared_typmod_registry == NULL);
    2022             :     Assert(CurrentSession->shared_record_table == NULL);
    2023             :     Assert(CurrentSession->shared_typmod_table == NULL);
    2024             : 
    2025             :     /*
    2026             :      * We can't already have typmods in our local cache, because they'd clash
    2027             :      * with those imported by SharedRecordTypmodRegistryInit.  This should be
    2028             :      * a freshly started parallel worker.  If we ever support worker
    2029             :      * recycling, a worker would need to zap its local cache in between
    2030             :      * servicing different queries, in order to be able to call this and
    2031             :      * synchronize typmods with a new leader; but that's problematic because
    2032             :      * we can't be very sure that record-typmod-related state hasn't escaped
    2033             :      * to anywhere else in the process.
    2034             :      */
    2035             :     Assert(NextRecordTypmod == 0);
    2036             : 
    2037        1616 :     old_context = MemoryContextSwitchTo(TopMemoryContext);
    2038             : 
    2039             :     /* Attach to the two hash tables. */
    2040        1616 :     record_table = dshash_attach(CurrentSession->area,
    2041             :                                  &srtr_record_table_params,
    2042             :                                  registry->record_table_handle,
    2043        1616 :                                  CurrentSession->area);
    2044        1616 :     typmod_table = dshash_attach(CurrentSession->area,
    2045             :                                  &srtr_typmod_table_params,
    2046             :                                  registry->typmod_table_handle,
    2047             :                                  NULL);
    2048             : 
    2049        1616 :     MemoryContextSwitchTo(old_context);
    2050             : 
    2051             :     /*
    2052             :      * Set up detach hook to run at worker exit.  Currently this is the same
    2053             :      * as the leader's detach hook, but in future they might need to be
    2054             :      * different.
    2055             :      */
    2056        1616 :     on_dsm_detach(CurrentSession->segment,
    2057             :                   shared_record_typmod_registry_detach,
    2058             :                   PointerGetDatum(registry));
    2059             : 
    2060             :     /*
    2061             :      * Set up the session state that will tell assign_record_type_typmod and
    2062             :      * lookup_rowtype_tupdesc_internal about the shared registry.
    2063             :      */
    2064        1616 :     CurrentSession->shared_typmod_registry = registry;
    2065        1616 :     CurrentSession->shared_record_table = record_table;
    2066        1616 :     CurrentSession->shared_typmod_table = typmod_table;
    2067        1616 : }
    2068             : 
    2069             : /*
    2070             :  * TypeCacheRelCallback
    2071             :  *      Relcache inval callback function
    2072             :  *
    2073             :  * Delete the cached tuple descriptor (if any) for the given rel's composite
    2074             :  * type, or for all composite types if relid == InvalidOid.  Also reset
    2075             :  * whatever info we have cached about the composite type's comparability.
    2076             :  *
    2077             :  * This is called when a relcache invalidation event occurs for the given
    2078             :  * relid.  We must scan the whole typcache hash since we don't know the
    2079             :  * type OID corresponding to the relid.  We could do a direct search if this
    2080             :  * were a syscache-flush callback on pg_type, but then we would need all
    2081             :  * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
    2082             :  * invals against the rel's pg_type OID.  The extra SI signaling could very
    2083             :  * well cost more than we'd save, since in most usages there are not very
    2084             :  * many entries in a backend's typcache.  The risk of bugs-of-omission seems
    2085             :  * high, too.
    2086             :  *
    2087             :  * Another possibility, with only localized impact, is to maintain a second
    2088             :  * hashtable that indexes composite-type typcache entries by their typrelid.
    2089             :  * But it's still not clear it's worth the trouble.
    2090             :  */
    2091             : static void
    2092      874942 : TypeCacheRelCallback(Datum arg, Oid relid)
    2093             : {
    2094             :     HASH_SEQ_STATUS status;
    2095             :     TypeCacheEntry *typentry;
    2096             : 
    2097             :     /* TypeCacheHash must exist, else this callback wouldn't be registered */
    2098      874942 :     hash_seq_init(&status, TypeCacheHash);
    2099    10990282 :     while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
    2100             :     {
    2101     9240398 :         if (typentry->typtype == TYPTYPE_COMPOSITE)
    2102             :         {
    2103             :             /* Skip if no match, unless we're zapping all composite types */
    2104     1744684 :             if (relid != typentry->typrelid && relid != InvalidOid)
    2105     1736694 :                 continue;
    2106             : 
    2107             :             /* Delete tupdesc if we have it */
    2108        7990 :             if (typentry->tupDesc != NULL)
    2109             :             {
    2110             :                 /*
    2111             :                  * Release our refcount, and free the tupdesc if none remain.
    2112             :                  * (Can't use DecrTupleDescRefCount because this reference is
    2113             :                  * not logged in current resource owner.)
    2114             :                  */
    2115             :                 Assert(typentry->tupDesc->tdrefcount > 0);
    2116        1912 :                 if (--typentry->tupDesc->tdrefcount == 0)
    2117        1156 :                     FreeTupleDesc(typentry->tupDesc);
    2118        1912 :                 typentry->tupDesc = NULL;
    2119             : 
    2120             :                 /*
    2121             :                  * Also clear tupDesc_identifier, so that anything watching
    2122             :                  * that will realize that the tupdesc has possibly changed.
    2123             :                  * (Alternatively, we could specify that to detect possible
    2124             :                  * tupdesc change, one must check for tupDesc != NULL as well
    2125             :                  * as tupDesc_identifier being the same as what was previously
    2126             :                  * seen.  That seems error-prone.)
    2127             :                  */
    2128        1912 :                 typentry->tupDesc_identifier = 0;
    2129             :             }
    2130             : 
    2131             :             /* Reset equality/comparison/hashing validity information */
    2132        7990 :             typentry->flags = 0;
    2133             :         }
    2134     7495714 :         else if (typentry->typtype == TYPTYPE_DOMAIN)
    2135             :         {
    2136             :             /*
    2137             :              * If it's domain over composite, reset flags.  (We don't bother
    2138             :              * trying to determine whether the specific base type needs a
    2139             :              * reset.)  Note that if we haven't determined whether the base
    2140             :              * type is composite, we don't need to reset anything.
    2141             :              */
    2142     1022840 :             if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
    2143           0 :                 typentry->flags = 0;
    2144             :         }
    2145             :     }
    2146      874942 : }
    2147             : 
    2148             : /*
    2149             :  * TypeCacheOpcCallback
    2150             :  *      Syscache inval callback function
    2151             :  *
    2152             :  * This is called when a syscache invalidation event occurs for any pg_opclass
    2153             :  * row.  In principle we could probably just invalidate data dependent on the
    2154             :  * particular opclass, but since updates on pg_opclass are rare in production
    2155             :  * it doesn't seem worth a lot of complication: we just mark all cached data
    2156             :  * invalid.
    2157             :  *
    2158             :  * Note that we don't bother watching for updates on pg_amop or pg_amproc.
    2159             :  * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
    2160             :  * is not allowed to be used to add/drop the primary operators and functions
    2161             :  * of an opclass, only cross-type members of a family; and the latter sorts
    2162             :  * of members are not going to get cached here.
    2163             :  */
    2164             : static void
    2165         758 : TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
    2166             : {
    2167             :     HASH_SEQ_STATUS status;
    2168             :     TypeCacheEntry *typentry;
    2169             : 
    2170             :     /* TypeCacheHash must exist, else this callback wouldn't be registered */
    2171         758 :     hash_seq_init(&status, TypeCacheHash);
    2172        5176 :     while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
    2173             :     {
    2174             :         /* Reset equality/comparison/hashing validity information */
    2175        3660 :         typentry->flags = 0;
    2176             :     }
    2177         758 : }
    2178             : 
    2179             : /*
    2180             :  * TypeCacheConstrCallback
    2181             :  *      Syscache inval callback function
    2182             :  *
    2183             :  * This is called when a syscache invalidation event occurs for any
    2184             :  * pg_constraint or pg_type row.  We flush information about domain
    2185             :  * constraints when this happens.
    2186             :  *
    2187             :  * It's slightly annoying that we can't tell whether the inval event was for a
    2188             :  * domain constraint/type record or not; there's usually more update traffic
    2189             :  * for table constraints/types than domain constraints, so we'll do a lot of
    2190             :  * useless flushes.  Still, this is better than the old no-caching-at-all
    2191             :  * approach to domain constraints.
    2192             :  */
    2193             : static void
    2194      370674 : TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
    2195             : {
    2196             :     TypeCacheEntry *typentry;
    2197             : 
    2198             :     /*
    2199             :      * Because this is called very frequently, and typically very few of the
    2200             :      * typcache entries are for domains, we don't use hash_seq_search here.
    2201             :      * Instead we thread all the domain-type entries together so that we can
    2202             :      * visit them cheaply.
    2203             :      */
    2204     1108482 :     for (typentry = firstDomainTypeEntry;
    2205             :          typentry != NULL;
    2206      367134 :          typentry = typentry->nextDomain)
    2207             :     {
    2208             :         /* Reset domain constraint validity information */
    2209      367134 :         typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
    2210             :     }
    2211      370674 : }
    2212             : 
    2213             : 
    2214             : /*
    2215             :  * Check if given OID is part of the subset that's sortable by comparisons
    2216             :  */
    2217             : static inline bool
    2218      300080 : enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
    2219             : {
    2220             :     Oid         offset;
    2221             : 
    2222      300080 :     if (arg < enumdata->bitmap_base)
    2223           0 :         return false;
    2224      300080 :     offset = arg - enumdata->bitmap_base;
    2225      300080 :     if (offset > (Oid) INT_MAX)
    2226           0 :         return false;
    2227      300080 :     return bms_is_member((int) offset, enumdata->sorted_values);
    2228             : }
    2229             : 
    2230             : 
    2231             : /*
    2232             :  * compare_values_of_enum
    2233             :  *      Compare two members of an enum type.
    2234             :  *      Return <0, 0, or >0 according as arg1 <, =, or > arg2.
    2235             :  *
    2236             :  * Note: currently, the enumData cache is refreshed only if we are asked
    2237             :  * to compare an enum value that is not already in the cache.  This is okay
    2238             :  * because there is no support for re-ordering existing values, so comparisons
    2239             :  * of previously cached values will return the right answer even if other
    2240             :  * values have been added since we last loaded the cache.
    2241             :  *
    2242             :  * Note: the enum logic has a special-case rule about even-numbered versus
    2243             :  * odd-numbered OIDs, but we take no account of that rule here; this
    2244             :  * routine shouldn't even get called when that rule applies.
    2245             :  */
    2246             : int
    2247      150052 : compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
    2248             : {
    2249             :     TypeCacheEnumData *enumdata;
    2250             :     EnumItem   *item1;
    2251             :     EnumItem   *item2;
    2252             : 
    2253             :     /*
    2254             :      * Equal OIDs are certainly equal --- this case was probably handled by
    2255             :      * our caller, but we may as well check.
    2256             :      */
    2257      150052 :     if (arg1 == arg2)
    2258           0 :         return 0;
    2259             : 
    2260             :     /* Load up the cache if first time through */
    2261      150052 :     if (tcache->enumData == NULL)
    2262           6 :         load_enum_cache_data(tcache);
    2263      150052 :     enumdata = tcache->enumData;
    2264             : 
    2265             :     /*
    2266             :      * If both OIDs are known-sorted, we can just compare them directly.
    2267             :      */
    2268      300080 :     if (enum_known_sorted(enumdata, arg1) &&
    2269      150028 :         enum_known_sorted(enumdata, arg2))
    2270             :     {
    2271           0 :         if (arg1 < arg2)
    2272           0 :             return -1;
    2273             :         else
    2274           0 :             return 1;
    2275             :     }
    2276             : 
    2277             :     /*
    2278             :      * Slow path: we have to identify their actual sort-order positions.
    2279             :      */
    2280      150052 :     item1 = find_enumitem(enumdata, arg1);
    2281      150052 :     item2 = find_enumitem(enumdata, arg2);
    2282             : 
    2283      150052 :     if (item1 == NULL || item2 == NULL)
    2284             :     {
    2285             :         /*
    2286             :          * We couldn't find one or both values.  That means the enum has
    2287             :          * changed under us, so re-initialize the cache and try again. We
    2288             :          * don't bother retrying the known-sorted case in this path.
    2289             :          */
    2290           0 :         load_enum_cache_data(tcache);
    2291           0 :         enumdata = tcache->enumData;
    2292             : 
    2293           0 :         item1 = find_enumitem(enumdata, arg1);
    2294           0 :         item2 = find_enumitem(enumdata, arg2);
    2295             : 
    2296             :         /*
    2297             :          * If we still can't find the values, complain: we must have corrupt
    2298             :          * data.
    2299             :          */
    2300           0 :         if (item1 == NULL)
    2301           0 :             elog(ERROR, "enum value %u not found in cache for enum %s",
    2302             :                  arg1, format_type_be(tcache->type_id));
    2303           0 :         if (item2 == NULL)
    2304           0 :             elog(ERROR, "enum value %u not found in cache for enum %s",
    2305             :                  arg2, format_type_be(tcache->type_id));
    2306             :     }
    2307             : 
    2308      150052 :     if (item1->sort_order < item2->sort_order)
    2309       50016 :         return -1;
    2310      100036 :     else if (item1->sort_order > item2->sort_order)
    2311      100036 :         return 1;
    2312             :     else
    2313           0 :         return 0;
    2314             : }
    2315             : 
    2316             : /*
    2317             :  * Load (or re-load) the enumData member of the typcache entry.
    2318             :  */
    2319             : static void
    2320           6 : load_enum_cache_data(TypeCacheEntry *tcache)
    2321             : {
    2322             :     TypeCacheEnumData *enumdata;
    2323             :     Relation    enum_rel;
    2324             :     SysScanDesc enum_scan;
    2325             :     HeapTuple   enum_tuple;
    2326             :     ScanKeyData skey;
    2327             :     EnumItem   *items;
    2328             :     int         numitems;
    2329             :     int         maxitems;
    2330             :     Oid         bitmap_base;
    2331             :     Bitmapset  *bitmap;
    2332             :     MemoryContext oldcxt;
    2333             :     int         bm_size,
    2334             :                 start_pos;
    2335             : 
    2336             :     /* Check that this is actually an enum */
    2337           6 :     if (tcache->typtype != TYPTYPE_ENUM)
    2338           0 :         ereport(ERROR,
    2339             :                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    2340             :                  errmsg("%s is not an enum",
    2341             :                         format_type_be(tcache->type_id))));
    2342             : 
    2343             :     /*
    2344             :      * Read all the information for members of the enum type.  We collect the
    2345             :      * info in working memory in the caller's context, and then transfer it to
    2346             :      * permanent memory in CacheMemoryContext.  This minimizes the risk of
    2347             :      * leaking memory from CacheMemoryContext in the event of an error partway
    2348             :      * through.
    2349             :      */
    2350           6 :     maxitems = 64;
    2351           6 :     items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
    2352           6 :     numitems = 0;
    2353             : 
    2354             :     /* Scan pg_enum for the members of the target enum type. */
    2355           6 :     ScanKeyInit(&skey,
    2356             :                 Anum_pg_enum_enumtypid,
    2357             :                 BTEqualStrategyNumber, F_OIDEQ,
    2358           6 :                 ObjectIdGetDatum(tcache->type_id));
    2359             : 
    2360           6 :     enum_rel = table_open(EnumRelationId, AccessShareLock);
    2361           6 :     enum_scan = systable_beginscan(enum_rel,
    2362             :                                    EnumTypIdLabelIndexId,
    2363             :                                    true, NULL,
    2364             :                                    1, &skey);
    2365             : 
    2366          52 :     while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
    2367             :     {
    2368          40 :         Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
    2369             : 
    2370          40 :         if (numitems >= maxitems)
    2371             :         {
    2372           0 :             maxitems *= 2;
    2373           0 :             items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
    2374             :         }
    2375          40 :         items[numitems].enum_oid = en->oid;
    2376          40 :         items[numitems].sort_order = en->enumsortorder;
    2377          40 :         numitems++;
    2378             :     }
    2379             : 
    2380           6 :     systable_endscan(enum_scan);
    2381           6 :     table_close(enum_rel, AccessShareLock);
    2382             : 
    2383             :     /* Sort the items into OID order */
    2384           6 :     qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
    2385             : 
    2386             :     /*
    2387             :      * Here, we create a bitmap listing a subset of the enum's OIDs that are
    2388             :      * known to be in order and can thus be compared with just OID comparison.
    2389             :      *
    2390             :      * The point of this is that the enum's initial OIDs were certainly in
    2391             :      * order, so there is some subset that can be compared via OID comparison;
    2392             :      * and we'd rather not do binary searches unnecessarily.
    2393             :      *
    2394             :      * This is somewhat heuristic, and might identify a subset of OIDs that
    2395             :      * isn't exactly what the type started with.  That's okay as long as the
    2396             :      * subset is correctly sorted.
    2397             :      */
    2398           6 :     bitmap_base = InvalidOid;
    2399           6 :     bitmap = NULL;
    2400           6 :     bm_size = 1;                /* only save sets of at least 2 OIDs */
    2401             : 
    2402          14 :     for (start_pos = 0; start_pos < numitems - 1; start_pos++)
    2403             :     {
    2404             :         /*
    2405             :          * Identify longest sorted subsequence starting at start_pos
    2406             :          */
    2407          14 :         Bitmapset  *this_bitmap = bms_make_singleton(0);
    2408          14 :         int         this_bm_size = 1;
    2409          14 :         Oid         start_oid = items[start_pos].enum_oid;
    2410          14 :         float4      prev_order = items[start_pos].sort_order;
    2411             :         int         i;
    2412             : 
    2413          92 :         for (i = start_pos + 1; i < numitems; i++)
    2414             :         {
    2415             :             Oid         offset;
    2416             : 
    2417          78 :             offset = items[i].enum_oid - start_oid;
    2418             :             /* quit if bitmap would be too large; cutoff is arbitrary */
    2419          78 :             if (offset >= 8192)
    2420           0 :                 break;
    2421             :             /* include the item if it's in-order */
    2422          78 :             if (items[i].sort_order > prev_order)
    2423             :             {
    2424          40 :                 prev_order = items[i].sort_order;
    2425          40 :                 this_bitmap = bms_add_member(this_bitmap, (int) offset);
    2426          40 :                 this_bm_size++;
    2427             :             }
    2428             :         }
    2429             : 
    2430             :         /* Remember it if larger than previous best */
    2431          14 :         if (this_bm_size > bm_size)
    2432             :         {
    2433           6 :             bms_free(bitmap);
    2434           6 :             bitmap_base = start_oid;
    2435           6 :             bitmap = this_bitmap;
    2436           6 :             bm_size = this_bm_size;
    2437             :         }
    2438             :         else
    2439           8 :             bms_free(this_bitmap);
    2440             : 
    2441             :         /*
    2442             :          * Done if it's not possible to find a longer sequence in the rest of
    2443             :          * the list.  In typical cases this will happen on the first
    2444             :          * iteration, which is why we create the bitmaps on the fly instead of
    2445             :          * doing a second pass over the list.
    2446             :          */
    2447          14 :         if (bm_size >= (numitems - start_pos - 1))
    2448           6 :             break;
    2449             :     }
    2450             : 
    2451             :     /* OK, copy the data into CacheMemoryContext */
    2452           6 :     oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
    2453           6 :     enumdata = (TypeCacheEnumData *)
    2454           6 :         palloc(offsetof(TypeCacheEnumData, enum_values) +
    2455           6 :                numitems * sizeof(EnumItem));
    2456           6 :     enumdata->bitmap_base = bitmap_base;
    2457           6 :     enumdata->sorted_values = bms_copy(bitmap);
    2458           6 :     enumdata->num_values = numitems;
    2459           6 :     memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
    2460           6 :     MemoryContextSwitchTo(oldcxt);
    2461             : 
    2462           6 :     pfree(items);
    2463           6 :     bms_free(bitmap);
    2464             : 
    2465             :     /* And link the finished cache struct into the typcache */
    2466           6 :     if (tcache->enumData != NULL)
    2467           0 :         pfree(tcache->enumData);
    2468           6 :     tcache->enumData = enumdata;
    2469           6 : }
    2470             : 
    2471             : /*
    2472             :  * Locate the EnumItem with the given OID, if present
    2473             :  */
    2474             : static EnumItem *
    2475      300104 : find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
    2476             : {
    2477             :     EnumItem    srch;
    2478             : 
    2479             :     /* On some versions of Solaris, bsearch of zero items dumps core */
    2480      300104 :     if (enumdata->num_values <= 0)
    2481           0 :         return NULL;
    2482             : 
    2483      300104 :     srch.enum_oid = arg;
    2484      300104 :     return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
    2485             :                    sizeof(EnumItem), enum_oid_cmp);
    2486             : }
    2487             : 
    2488             : /*
    2489             :  * qsort comparison function for OID-ordered EnumItems
    2490             :  */
    2491             : static int
    2492      600360 : enum_oid_cmp(const void *left, const void *right)
    2493             : {
    2494      600360 :     const EnumItem *l = (const EnumItem *) left;
    2495      600360 :     const EnumItem *r = (const EnumItem *) right;
    2496             : 
    2497      600360 :     if (l->enum_oid < r->enum_oid)
    2498      150132 :         return -1;
    2499      450228 :     else if (l->enum_oid > r->enum_oid)
    2500      150124 :         return 1;
    2501             :     else
    2502      300104 :         return 0;
    2503             : }
    2504             : 
    2505             : /*
    2506             :  * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
    2507             :  * to the given value and return a dsa_pointer.
    2508             :  */
    2509             : static dsa_pointer
    2510          32 : share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
    2511             : {
    2512             :     dsa_pointer shared_dp;
    2513             :     TupleDesc   shared;
    2514             : 
    2515          32 :     shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
    2516          32 :     shared = (TupleDesc) dsa_get_address(area, shared_dp);
    2517          32 :     TupleDescCopy(shared, tupdesc);
    2518          32 :     shared->tdtypmod = typmod;
    2519             : 
    2520          32 :     return shared_dp;
    2521             : }
    2522             : 
    2523             : /*
    2524             :  * If we are attached to a SharedRecordTypmodRegistry, use it to find or
    2525             :  * create a shared TupleDesc that matches 'tupdesc'.  Otherwise return NULL.
    2526             :  * Tuple descriptors returned by this function are not reference counted, and
    2527             :  * will exist at least as long as the current backend remained attached to the
    2528             :  * current session.
    2529             :  */
    2530             : static TupleDesc
    2531       11316 : find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
    2532             : {
    2533             :     TupleDesc   result;
    2534             :     SharedRecordTableKey key;
    2535             :     SharedRecordTableEntry *record_table_entry;
    2536             :     SharedTypmodTableEntry *typmod_table_entry;
    2537             :     dsa_pointer shared_dp;
    2538             :     bool        found;
    2539             :     uint32      typmod;
    2540             : 
    2541             :     /* If not even attached, nothing to do. */
    2542       11316 :     if (CurrentSession->shared_typmod_registry == NULL)
    2543       11272 :         return NULL;
    2544             : 
    2545             :     /* Try to find a matching tuple descriptor in the record table. */
    2546          44 :     key.shared = false;
    2547          44 :     key.u.local_tupdesc = tupdesc;
    2548          44 :     record_table_entry = (SharedRecordTableEntry *)
    2549          44 :         dshash_find(CurrentSession->shared_record_table, &key, false);
    2550          44 :     if (record_table_entry)
    2551             :     {
    2552             :         Assert(record_table_entry->key.shared);
    2553          12 :         dshash_release_lock(CurrentSession->shared_record_table,
    2554             :                             record_table_entry);
    2555          12 :         result = (TupleDesc)
    2556          12 :             dsa_get_address(CurrentSession->area,
    2557             :                             record_table_entry->key.u.shared_tupdesc);
    2558             :         Assert(result->tdrefcount == -1);
    2559             : 
    2560          12 :         return result;
    2561             :     }
    2562             : 
    2563             :     /* Allocate a new typmod number.  This will be wasted if we error out. */
    2564          32 :     typmod = (int)
    2565          32 :         pg_atomic_fetch_add_u32(&CurrentSession->shared_typmod_registry->next_typmod,
    2566             :                                 1);
    2567             : 
    2568             :     /* Copy the TupleDesc into shared memory. */
    2569          32 :     shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
    2570             : 
    2571             :     /*
    2572             :      * Create an entry in the typmod table so that others will understand this
    2573             :      * typmod number.
    2574             :      */
    2575          32 :     PG_TRY();
    2576             :     {
    2577          32 :         typmod_table_entry = (SharedTypmodTableEntry *)
    2578          32 :             dshash_find_or_insert(CurrentSession->shared_typmod_table,
    2579             :                                   &typmod, &found);
    2580          32 :         if (found)
    2581           0 :             elog(ERROR, "cannot create duplicate shared record typmod");
    2582             :     }
    2583           0 :     PG_CATCH();
    2584             :     {
    2585           0 :         dsa_free(CurrentSession->area, shared_dp);
    2586           0 :         PG_RE_THROW();
    2587             :     }
    2588          32 :     PG_END_TRY();
    2589          32 :     typmod_table_entry->typmod = typmod;
    2590          32 :     typmod_table_entry->shared_tupdesc = shared_dp;
    2591          32 :     dshash_release_lock(CurrentSession->shared_typmod_table,
    2592             :                         typmod_table_entry);
    2593             : 
    2594             :     /*
    2595             :      * Finally create an entry in the record table so others with matching
    2596             :      * tuple descriptors can reuse the typmod.
    2597             :      */
    2598          32 :     record_table_entry = (SharedRecordTableEntry *)
    2599          32 :         dshash_find_or_insert(CurrentSession->shared_record_table, &key,
    2600             :                               &found);
    2601          32 :     if (found)
    2602             :     {
    2603             :         /*
    2604             :          * Someone concurrently inserted a matching tuple descriptor since the
    2605             :          * first time we checked.  Use that one instead.
    2606             :          */
    2607           0 :         dshash_release_lock(CurrentSession->shared_record_table,
    2608             :                             record_table_entry);
    2609             : 
    2610             :         /* Might as well free up the space used by the one we created. */
    2611           0 :         found = dshash_delete_key(CurrentSession->shared_typmod_table,
    2612             :                                   &typmod);
    2613             :         Assert(found);
    2614           0 :         dsa_free(CurrentSession->area, shared_dp);
    2615             : 
    2616             :         /* Return the one we found. */
    2617             :         Assert(record_table_entry->key.shared);
    2618           0 :         result = (TupleDesc)
    2619           0 :             dsa_get_address(CurrentSession->area,
    2620           0 :                             record_table_entry->key.shared);
    2621             :         Assert(result->tdrefcount == -1);
    2622             : 
    2623           0 :         return result;
    2624             :     }
    2625             : 
    2626             :     /* Store it and return it. */
    2627          32 :     record_table_entry->key.shared = true;
    2628          32 :     record_table_entry->key.u.shared_tupdesc = shared_dp;
    2629          32 :     dshash_release_lock(CurrentSession->shared_record_table,
    2630             :                         record_table_entry);
    2631          32 :     result = (TupleDesc)
    2632          32 :         dsa_get_address(CurrentSession->area, shared_dp);
    2633             :     Assert(result->tdrefcount == -1);
    2634             : 
    2635          32 :     return result;
    2636             : }
    2637             : 
    2638             : /*
    2639             :  * On-DSM-detach hook to forget about the current shared record typmod
    2640             :  * infrastructure.  This is currently used by both leader and workers.
    2641             :  */
    2642             : static void
    2643        1660 : shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
    2644             : {
    2645             :     /* Be cautious here: maybe we didn't finish initializing. */
    2646        1660 :     if (CurrentSession->shared_record_table != NULL)
    2647             :     {
    2648        1660 :         dshash_detach(CurrentSession->shared_record_table);
    2649        1660 :         CurrentSession->shared_record_table = NULL;
    2650             :     }
    2651        1660 :     if (CurrentSession->shared_typmod_table != NULL)
    2652             :     {
    2653        1660 :         dshash_detach(CurrentSession->shared_typmod_table);
    2654        1660 :         CurrentSession->shared_typmod_table = NULL;
    2655             :     }
    2656        1660 :     CurrentSession->shared_typmod_registry = NULL;
    2657        1660 : }

Generated by: LCOV version 1.13