LCOV - code coverage report
Current view: top level - src/backend/utils/cache - typcache.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13beta1 Lines: 741 850 87.2 %
Date: 2020-06-01 09:07:10 Functions: 47 49 95.9 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * typcache.c
       4             :  *    POSTGRES type cache code
       5             :  *
       6             :  * The type cache exists to speed lookup of certain information about data
       7             :  * types that is not directly available from a type's pg_type row.  For
       8             :  * example, we use a type's default btree opclass, or the default hash
       9             :  * opclass if no btree opclass exists, to determine which operators should
      10             :  * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
      11             :  *
      12             :  * Several seemingly-odd choices have been made to support use of the type
      13             :  * cache by generic array and record handling routines, such as array_eq(),
      14             :  * record_cmp(), and hash_array().  Because those routines are used as index
      15             :  * support operations, they cannot leak memory.  To allow them to execute
      16             :  * efficiently, all information that they would like to re-use across calls
      17             :  * is kept in the type cache.
      18             :  *
      19             :  * Once created, a type cache entry lives as long as the backend does, so
      20             :  * there is no need for a call to release a cache entry.  If the type is
      21             :  * dropped, the cache entry simply becomes wasted storage.  This is not
      22             :  * expected to happen often, and assuming that typcache entries are good
      23             :  * permanently allows caching pointers to them in long-lived places.
      24             :  *
      25             :  * We have some provisions for updating cache entries if the stored data
      26             :  * becomes obsolete.  Core data extracted from the pg_type row is updated
      27             :  * when we detect updates to pg_type.  Information dependent on opclasses is
      28             :  * cleared if we detect updates to pg_opclass.  We also support clearing the
      29             :  * tuple descriptor and operator/function parts of a rowtype's cache entry,
      30             :  * since those may need to change as a consequence of ALTER TABLE.  Domain
      31             :  * constraint changes are also tracked properly.
      32             :  *
      33             :  *
      34             :  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
      35             :  * Portions Copyright (c) 1994, Regents of the University of California
      36             :  *
      37             :  * IDENTIFICATION
      38             :  *    src/backend/utils/cache/typcache.c
      39             :  *
      40             :  *-------------------------------------------------------------------------
      41             :  */
      42             : #include "postgres.h"
      43             : 
      44             : #include <limits.h>
      45             : 
      46             : #include "access/hash.h"
      47             : #include "access/htup_details.h"
      48             : #include "access/nbtree.h"
      49             : #include "access/parallel.h"
      50             : #include "access/relation.h"
      51             : #include "access/session.h"
      52             : #include "access/table.h"
      53             : #include "catalog/indexing.h"
      54             : #include "catalog/pg_am.h"
      55             : #include "catalog/pg_constraint.h"
      56             : #include "catalog/pg_enum.h"
      57             : #include "catalog/pg_operator.h"
      58             : #include "catalog/pg_range.h"
      59             : #include "catalog/pg_type.h"
      60             : #include "commands/defrem.h"
      61             : #include "executor/executor.h"
      62             : #include "lib/dshash.h"
      63             : #include "optimizer/optimizer.h"
      64             : #include "storage/lwlock.h"
      65             : #include "utils/builtins.h"
      66             : #include "utils/catcache.h"
      67             : #include "utils/fmgroids.h"
      68             : #include "utils/inval.h"
      69             : #include "utils/lsyscache.h"
      70             : #include "utils/memutils.h"
      71             : #include "utils/rel.h"
      72             : #include "utils/snapmgr.h"
      73             : #include "utils/syscache.h"
      74             : #include "utils/typcache.h"
      75             : 
      76             : 
      77             : /* The main type cache hashtable searched by lookup_type_cache */
      78             : static HTAB *TypeCacheHash = NULL;
      79             : 
      80             : /* List of type cache entries for domain types */
      81             : static TypeCacheEntry *firstDomainTypeEntry = NULL;
      82             : 
      83             : /* Private flag bits in the TypeCacheEntry.flags field */
      84             : #define TCFLAGS_HAVE_PG_TYPE_DATA           0x000001
      85             : #define TCFLAGS_CHECKED_BTREE_OPCLASS       0x000002
      86             : #define TCFLAGS_CHECKED_HASH_OPCLASS        0x000004
      87             : #define TCFLAGS_CHECKED_EQ_OPR              0x000008
      88             : #define TCFLAGS_CHECKED_LT_OPR              0x000010
      89             : #define TCFLAGS_CHECKED_GT_OPR              0x000020
      90             : #define TCFLAGS_CHECKED_CMP_PROC            0x000040
      91             : #define TCFLAGS_CHECKED_HASH_PROC           0x000080
      92             : #define TCFLAGS_CHECKED_HASH_EXTENDED_PROC  0x000100
      93             : #define TCFLAGS_CHECKED_ELEM_PROPERTIES     0x000200
      94             : #define TCFLAGS_HAVE_ELEM_EQUALITY          0x000400
      95             : #define TCFLAGS_HAVE_ELEM_COMPARE           0x000800
      96             : #define TCFLAGS_HAVE_ELEM_HASHING           0x001000
      97             : #define TCFLAGS_HAVE_ELEM_EXTENDED_HASHING  0x002000
      98             : #define TCFLAGS_CHECKED_FIELD_PROPERTIES    0x004000
      99             : #define TCFLAGS_HAVE_FIELD_EQUALITY         0x008000
     100             : #define TCFLAGS_HAVE_FIELD_COMPARE          0x010000
     101             : #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS  0x020000
     102             : #define TCFLAGS_DOMAIN_BASE_IS_COMPOSITE    0x040000
     103             : 
     104             : /* The flags associated with equality/comparison/hashing are all but these: */
     105             : #define TCFLAGS_OPERATOR_FLAGS \
     106             :     (~(TCFLAGS_HAVE_PG_TYPE_DATA | \
     107             :        TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS | \
     108             :        TCFLAGS_DOMAIN_BASE_IS_COMPOSITE))
     109             : 
     110             : /*
     111             :  * Data stored about a domain type's constraints.  Note that we do not create
     112             :  * this struct for the common case of a constraint-less domain; we just set
     113             :  * domainData to NULL to indicate that.
     114             :  *
     115             :  * Within a DomainConstraintCache, we store expression plan trees, but the
     116             :  * check_exprstate fields of the DomainConstraintState nodes are just NULL.
     117             :  * When needed, expression evaluation nodes are built by flat-copying the
     118             :  * DomainConstraintState nodes and applying ExecInitExpr to check_expr.
     119             :  * Such a node tree is not part of the DomainConstraintCache, but is
     120             :  * considered to belong to a DomainConstraintRef.
     121             :  */
     122             : struct DomainConstraintCache
     123             : {
     124             :     List       *constraints;    /* list of DomainConstraintState nodes */
     125             :     MemoryContext dccContext;   /* memory context holding all associated data */
     126             :     long        dccRefCount;    /* number of references to this struct */
     127             : };
     128             : 
     129             : /* Private information to support comparisons of enum values */
     130             : typedef struct
     131             : {
     132             :     Oid         enum_oid;       /* OID of one enum value */
     133             :     float4      sort_order;     /* its sort position */
     134             : } EnumItem;
     135             : 
     136             : typedef struct TypeCacheEnumData
     137             : {
     138             :     Oid         bitmap_base;    /* OID corresponding to bit 0 of bitmapset */
     139             :     Bitmapset  *sorted_values;  /* Set of OIDs known to be in order */
     140             :     int         num_values;     /* total number of values in enum */
     141             :     EnumItem    enum_values[FLEXIBLE_ARRAY_MEMBER];
     142             : } TypeCacheEnumData;
     143             : 
     144             : /*
     145             :  * We use a separate table for storing the definitions of non-anonymous
     146             :  * record types.  Once defined, a record type will be remembered for the
     147             :  * life of the backend.  Subsequent uses of the "same" record type (where
     148             :  * sameness means equalTupleDescs) will refer to the existing table entry.
     149             :  *
     150             :  * Stored record types are remembered in a linear array of TupleDescs,
     151             :  * which can be indexed quickly with the assigned typmod.  There is also
     152             :  * a hash table to speed searches for matching TupleDescs.
     153             :  */
     154             : 
     155             : typedef struct RecordCacheEntry
     156             : {
     157             :     TupleDesc   tupdesc;
     158             : } RecordCacheEntry;
     159             : 
     160             : /*
     161             :  * To deal with non-anonymous record types that are exchanged by backends
     162             :  * involved in a parallel query, we also need a shared version of the above.
     163             :  */
     164             : struct SharedRecordTypmodRegistry
     165             : {
     166             :     /* A hash table for finding a matching TupleDesc. */
     167             :     dshash_table_handle record_table_handle;
     168             :     /* A hash table for finding a TupleDesc by typmod. */
     169             :     dshash_table_handle typmod_table_handle;
     170             :     /* A source of new record typmod numbers. */
     171             :     pg_atomic_uint32 next_typmod;
     172             : };
     173             : 
     174             : /*
     175             :  * When using shared tuple descriptors as hash table keys we need a way to be
     176             :  * able to search for an equal shared TupleDesc using a backend-local
     177             :  * TupleDesc.  So we use this type which can hold either, and hash and compare
     178             :  * functions that know how to handle both.
     179             :  */
     180             : typedef struct SharedRecordTableKey
     181             : {
     182             :     union
     183             :     {
     184             :         TupleDesc   local_tupdesc;
     185             :         dsa_pointer shared_tupdesc;
     186             :     }           u;
     187             :     bool        shared;
     188             : } SharedRecordTableKey;
     189             : 
     190             : /*
     191             :  * The shared version of RecordCacheEntry.  This lets us look up a typmod
     192             :  * using a TupleDesc which may be in local or shared memory.
     193             :  */
     194             : typedef struct SharedRecordTableEntry
     195             : {
     196             :     SharedRecordTableKey key;
     197             : } SharedRecordTableEntry;
     198             : 
     199             : /*
     200             :  * An entry in SharedRecordTypmodRegistry's typmod table.  This lets us look
     201             :  * up a TupleDesc in shared memory using a typmod.
     202             :  */
     203             : typedef struct SharedTypmodTableEntry
     204             : {
     205             :     uint32      typmod;
     206             :     dsa_pointer shared_tupdesc;
     207             : } SharedTypmodTableEntry;
     208             : 
     209             : /*
     210             :  * A comparator function for SharedRecordTableKey.
     211             :  */
     212             : static int
     213          24 : shared_record_table_compare(const void *a, const void *b, size_t size,
     214             :                             void *arg)
     215             : {
     216          24 :     dsa_area   *area = (dsa_area *) arg;
     217          24 :     SharedRecordTableKey *k1 = (SharedRecordTableKey *) a;
     218          24 :     SharedRecordTableKey *k2 = (SharedRecordTableKey *) b;
     219             :     TupleDesc   t1;
     220             :     TupleDesc   t2;
     221             : 
     222          24 :     if (k1->shared)
     223           0 :         t1 = (TupleDesc) dsa_get_address(area, k1->u.shared_tupdesc);
     224             :     else
     225          24 :         t1 = k1->u.local_tupdesc;
     226             : 
     227          24 :     if (k2->shared)
     228          24 :         t2 = (TupleDesc) dsa_get_address(area, k2->u.shared_tupdesc);
     229             :     else
     230           0 :         t2 = k2->u.local_tupdesc;
     231             : 
     232          24 :     return equalTupleDescs(t1, t2) ? 0 : 1;
     233             : }
     234             : 
     235             : /*
     236             :  * A hash function for SharedRecordTableKey.
     237             :  */
     238             : static uint32
     239          80 : shared_record_table_hash(const void *a, size_t size, void *arg)
     240             : {
     241          80 :     dsa_area   *area = (dsa_area *) arg;
     242          80 :     SharedRecordTableKey *k = (SharedRecordTableKey *) a;
     243             :     TupleDesc   t;
     244             : 
     245          80 :     if (k->shared)
     246           0 :         t = (TupleDesc) dsa_get_address(area, k->u.shared_tupdesc);
     247             :     else
     248          80 :         t = k->u.local_tupdesc;
     249             : 
     250          80 :     return hashTupleDesc(t);
     251             : }
     252             : 
     253             : /* Parameters for SharedRecordTypmodRegistry's TupleDesc table. */
     254             : static const dshash_parameters srtr_record_table_params = {
     255             :     sizeof(SharedRecordTableKey),   /* unused */
     256             :     sizeof(SharedRecordTableEntry),
     257             :     shared_record_table_compare,
     258             :     shared_record_table_hash,
     259             :     LWTRANCHE_PER_SESSION_RECORD_TYPE
     260             : };
     261             : 
     262             : /* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
     263             : static const dshash_parameters srtr_typmod_table_params = {
     264             :     sizeof(uint32),
     265             :     sizeof(SharedTypmodTableEntry),
     266             :     dshash_memcmp,
     267             :     dshash_memhash,
     268             :     LWTRANCHE_PER_SESSION_RECORD_TYPMOD
     269             : };
     270             : 
     271             : /* hashtable for recognizing registered record types */
     272             : static HTAB *RecordCacheHash = NULL;
     273             : 
     274             : /* arrays of info about registered record types, indexed by assigned typmod */
     275             : static TupleDesc *RecordCacheArray = NULL;
     276             : static uint64 *RecordIdentifierArray = NULL;
     277             : static int32 RecordCacheArrayLen = 0;   /* allocated length of above arrays */
     278             : static int32 NextRecordTypmod = 0;  /* number of entries used */
     279             : 
     280             : /*
     281             :  * Process-wide counter for generating unique tupledesc identifiers.
     282             :  * Zero and one (INVALID_TUPLEDESC_IDENTIFIER) aren't allowed to be chosen
     283             :  * as identifiers, so we start the counter at INVALID_TUPLEDESC_IDENTIFIER.
     284             :  */
     285             : static uint64 tupledesc_id_counter = INVALID_TUPLEDESC_IDENTIFIER;
     286             : 
     287             : static void load_typcache_tupdesc(TypeCacheEntry *typentry);
     288             : static void load_rangetype_info(TypeCacheEntry *typentry);
     289             : static void load_domaintype_info(TypeCacheEntry *typentry);
     290             : static int  dcs_cmp(const void *a, const void *b);
     291             : static void decr_dcc_refcount(DomainConstraintCache *dcc);
     292             : static void dccref_deletion_callback(void *arg);
     293             : static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
     294             : static bool array_element_has_equality(TypeCacheEntry *typentry);
     295             : static bool array_element_has_compare(TypeCacheEntry *typentry);
     296             : static bool array_element_has_hashing(TypeCacheEntry *typentry);
     297             : static bool array_element_has_extended_hashing(TypeCacheEntry *typentry);
     298             : static void cache_array_element_properties(TypeCacheEntry *typentry);
     299             : static bool record_fields_have_equality(TypeCacheEntry *typentry);
     300             : static bool record_fields_have_compare(TypeCacheEntry *typentry);
     301             : static void cache_record_field_properties(TypeCacheEntry *typentry);
     302             : static bool range_element_has_hashing(TypeCacheEntry *typentry);
     303             : static bool range_element_has_extended_hashing(TypeCacheEntry *typentry);
     304             : static void cache_range_element_properties(TypeCacheEntry *typentry);
     305             : static void TypeCacheRelCallback(Datum arg, Oid relid);
     306             : static void TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue);
     307             : static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
     308             : static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
     309             : static void load_enum_cache_data(TypeCacheEntry *tcache);
     310             : static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
     311             : static int  enum_oid_cmp(const void *left, const void *right);
     312             : static void shared_record_typmod_registry_detach(dsm_segment *segment,
     313             :                                                  Datum datum);
     314             : static TupleDesc find_or_make_matching_shared_tupledesc(TupleDesc tupdesc);
     315             : static dsa_pointer share_tupledesc(dsa_area *area, TupleDesc tupdesc,
     316             :                                    uint32 typmod);
     317             : 
     318             : 
     319             : /*
     320             :  * lookup_type_cache
     321             :  *
     322             :  * Fetch the type cache entry for the specified datatype, and make sure that
     323             :  * all the fields requested by bits in 'flags' are valid.
     324             :  *
     325             :  * The result is never NULL --- we will ereport() if the passed type OID is
     326             :  * invalid.  Note however that we may fail to find one or more of the
     327             :  * values requested by 'flags'; the caller needs to check whether the fields
     328             :  * are InvalidOid or not.
     329             :  */
     330             : TypeCacheEntry *
     331      618504 : lookup_type_cache(Oid type_id, int flags)
     332             : {
     333             :     TypeCacheEntry *typentry;
     334             :     bool        found;
     335             : 
     336      618504 :     if (TypeCacheHash == NULL)
     337             :     {
     338             :         /* First time through: initialize the hash table */
     339             :         HASHCTL     ctl;
     340             : 
     341       32424 :         MemSet(&ctl, 0, sizeof(ctl));
     342        2316 :         ctl.keysize = sizeof(Oid);
     343        2316 :         ctl.entrysize = sizeof(TypeCacheEntry);
     344        2316 :         TypeCacheHash = hash_create("Type information cache", 64,
     345             :                                     &ctl, HASH_ELEM | HASH_BLOBS);
     346             : 
     347             :         /* Also set up callbacks for SI invalidations */
     348        2316 :         CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
     349        2316 :         CacheRegisterSyscacheCallback(TYPEOID, TypeCacheTypCallback, (Datum) 0);
     350        2316 :         CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
     351        2316 :         CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
     352             : 
     353             :         /* Also make sure CacheMemoryContext exists */
     354        2316 :         if (!CacheMemoryContext)
     355           0 :             CreateCacheMemoryContext();
     356             :     }
     357             : 
     358             :     /* Try to look up an existing entry */
     359      618504 :     typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
     360             :                                               (void *) &type_id,
     361             :                                               HASH_FIND, NULL);
     362      618504 :     if (typentry == NULL)
     363             :     {
     364             :         /*
     365             :          * If we didn't find one, we want to make one.  But first look up the
     366             :          * pg_type row, just to make sure we don't make a cache entry for an
     367             :          * invalid type OID.  If the type OID is not valid, present a
     368             :          * user-facing error, since some code paths such as domain_in() allow
     369             :          * this function to be reached with a user-supplied OID.
     370             :          */
     371             :         HeapTuple   tp;
     372             :         Form_pg_type typtup;
     373             : 
     374       22684 :         tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
     375       22684 :         if (!HeapTupleIsValid(tp))
     376           0 :             ereport(ERROR,
     377             :                     (errcode(ERRCODE_UNDEFINED_OBJECT),
     378             :                      errmsg("type with OID %u does not exist", type_id)));
     379       22684 :         typtup = (Form_pg_type) GETSTRUCT(tp);
     380       22684 :         if (!typtup->typisdefined)
     381           0 :             ereport(ERROR,
     382             :                     (errcode(ERRCODE_UNDEFINED_OBJECT),
     383             :                      errmsg("type \"%s\" is only a shell",
     384             :                             NameStr(typtup->typname))));
     385             : 
     386             :         /* Now make the typcache entry */
     387       22684 :         typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
     388             :                                                   (void *) &type_id,
     389             :                                                   HASH_ENTER, &found);
     390             :         Assert(!found);         /* it wasn't there a moment ago */
     391             : 
     392     1383724 :         MemSet(typentry, 0, sizeof(TypeCacheEntry));
     393             : 
     394             :         /* These fields can never change, by definition */
     395       22684 :         typentry->type_id = type_id;
     396       22684 :         typentry->type_id_hash = GetSysCacheHashValue1(TYPEOID,
     397             :                                                        ObjectIdGetDatum(type_id));
     398             : 
     399             :         /* Keep this part in sync with the code below */
     400       22684 :         typentry->typlen = typtup->typlen;
     401       22684 :         typentry->typbyval = typtup->typbyval;
     402       22684 :         typentry->typalign = typtup->typalign;
     403       22684 :         typentry->typstorage = typtup->typstorage;
     404       22684 :         typentry->typtype = typtup->typtype;
     405       22684 :         typentry->typrelid = typtup->typrelid;
     406       22684 :         typentry->typelem = typtup->typelem;
     407       22684 :         typentry->typcollation = typtup->typcollation;
     408       22684 :         typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
     409             : 
     410             :         /* If it's a domain, immediately thread it into the domain cache list */
     411       22684 :         if (typentry->typtype == TYPTYPE_DOMAIN)
     412             :         {
     413        2144 :             typentry->nextDomain = firstDomainTypeEntry;
     414        2144 :             firstDomainTypeEntry = typentry;
     415             :         }
     416             : 
     417       22684 :         ReleaseSysCache(tp);
     418             :     }
     419      595820 :     else if (!(typentry->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
     420             :     {
     421             :         /*
     422             :          * We have an entry, but its pg_type row got changed, so reload the
     423             :          * data obtained directly from pg_type.
     424             :          */
     425             :         HeapTuple   tp;
     426             :         Form_pg_type typtup;
     427             : 
     428         232 :         tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
     429         232 :         if (!HeapTupleIsValid(tp))
     430           0 :             ereport(ERROR,
     431             :                     (errcode(ERRCODE_UNDEFINED_OBJECT),
     432             :                      errmsg("type with OID %u does not exist", type_id)));
     433         232 :         typtup = (Form_pg_type) GETSTRUCT(tp);
     434         232 :         if (!typtup->typisdefined)
     435           0 :             ereport(ERROR,
     436             :                     (errcode(ERRCODE_UNDEFINED_OBJECT),
     437             :                      errmsg("type \"%s\" is only a shell",
     438             :                             NameStr(typtup->typname))));
     439             : 
     440             :         /*
     441             :          * Keep this part in sync with the code above.  Many of these fields
     442             :          * shouldn't ever change, particularly typtype, but copy 'em anyway.
     443             :          */
     444         232 :         typentry->typlen = typtup->typlen;
     445         232 :         typentry->typbyval = typtup->typbyval;
     446         232 :         typentry->typalign = typtup->typalign;
     447         232 :         typentry->typstorage = typtup->typstorage;
     448         232 :         typentry->typtype = typtup->typtype;
     449         232 :         typentry->typrelid = typtup->typrelid;
     450         232 :         typentry->typelem = typtup->typelem;
     451         232 :         typentry->typcollation = typtup->typcollation;
     452         232 :         typentry->flags |= TCFLAGS_HAVE_PG_TYPE_DATA;
     453             : 
     454         232 :         ReleaseSysCache(tp);
     455             :     }
     456             : 
     457             :     /*
     458             :      * Look up opclasses if we haven't already and any dependent info is
     459             :      * requested.
     460             :      */
     461      618504 :     if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
     462             :                   TYPECACHE_CMP_PROC |
     463             :                   TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
     464      407640 :                   TYPECACHE_BTREE_OPFAMILY)) &&
     465      407640 :         !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
     466             :     {
     467             :         Oid         opclass;
     468             : 
     469       19640 :         opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
     470       19640 :         if (OidIsValid(opclass))
     471             :         {
     472       18434 :             typentry->btree_opf = get_opclass_family(opclass);
     473       18434 :             typentry->btree_opintype = get_opclass_input_type(opclass);
     474             :         }
     475             :         else
     476             :         {
     477        1206 :             typentry->btree_opf = typentry->btree_opintype = InvalidOid;
     478             :         }
     479             : 
     480             :         /*
     481             :          * Reset information derived from btree opclass.  Note in particular
     482             :          * that we'll redetermine the eq_opr even if we previously found one;
     483             :          * this matters in case a btree opclass has been added to a type that
     484             :          * previously had only a hash opclass.
     485             :          */
     486       19640 :         typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
     487             :                              TCFLAGS_CHECKED_LT_OPR |
     488             :                              TCFLAGS_CHECKED_GT_OPR |
     489             :                              TCFLAGS_CHECKED_CMP_PROC);
     490       19640 :         typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
     491             :     }
     492             : 
     493             :     /*
     494             :      * If we need to look up equality operator, and there's no btree opclass,
     495             :      * force lookup of hash opclass.
     496             :      */
     497      618504 :     if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
     498      394674 :         !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
     499       19504 :         typentry->btree_opf == InvalidOid)
     500        1202 :         flags |= TYPECACHE_HASH_OPFAMILY;
     501             : 
     502      618504 :     if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
     503             :                   TYPECACHE_HASH_EXTENDED_PROC |
     504             :                   TYPECACHE_HASH_EXTENDED_PROC_FINFO |
     505      111742 :                   TYPECACHE_HASH_OPFAMILY)) &&
     506      111742 :         !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
     507             :     {
     508             :         Oid         opclass;
     509             : 
     510        9158 :         opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
     511        9158 :         if (OidIsValid(opclass))
     512             :         {
     513        8960 :             typentry->hash_opf = get_opclass_family(opclass);
     514        8960 :             typentry->hash_opintype = get_opclass_input_type(opclass);
     515             :         }
     516             :         else
     517             :         {
     518         198 :             typentry->hash_opf = typentry->hash_opintype = InvalidOid;
     519             :         }
     520             : 
     521             :         /*
     522             :          * Reset information derived from hash opclass.  We do *not* reset the
     523             :          * eq_opr; if we already found one from the btree opclass, that
     524             :          * decision is still good.
     525             :          */
     526        9158 :         typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
     527             :                              TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
     528        9158 :         typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
     529             :     }
     530             : 
     531             :     /*
     532             :      * Look for requested operators and functions, if we haven't already.
     533             :      */
     534      618504 :     if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
     535      394674 :         !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
     536             :     {
     537       19504 :         Oid         eq_opr = InvalidOid;
     538             : 
     539       19504 :         if (typentry->btree_opf != InvalidOid)
     540       18302 :             eq_opr = get_opfamily_member(typentry->btree_opf,
     541             :                                          typentry->btree_opintype,
     542             :                                          typentry->btree_opintype,
     543             :                                          BTEqualStrategyNumber);
     544       19504 :         if (eq_opr == InvalidOid &&
     545        1202 :             typentry->hash_opf != InvalidOid)
     546        1072 :             eq_opr = get_opfamily_member(typentry->hash_opf,
     547             :                                          typentry->hash_opintype,
     548             :                                          typentry->hash_opintype,
     549             :                                          HTEqualStrategyNumber);
     550             : 
     551             :         /*
     552             :          * If the proposed equality operator is array_eq or record_eq, check
     553             :          * to see if the element type or column types support equality.  If
     554             :          * not, array_eq or record_eq would fail at runtime, so we don't want
     555             :          * to report that the type has equality.  (We can omit similar
     556             :          * checking for ranges because ranges can't be created in the first
     557             :          * place unless their subtypes support equality.)
     558             :          */
     559       19504 :         if (eq_opr == ARRAY_EQ_OP &&
     560        3254 :             !array_element_has_equality(typentry))
     561         448 :             eq_opr = InvalidOid;
     562       19056 :         else if (eq_opr == RECORD_EQ_OP &&
     563         100 :                  !record_fields_have_equality(typentry))
     564           8 :             eq_opr = InvalidOid;
     565             : 
     566             :         /* Force update of eq_opr_finfo only if we're changing state */
     567       19504 :         if (typentry->eq_opr != eq_opr)
     568       18630 :             typentry->eq_opr_finfo.fn_oid = InvalidOid;
     569             : 
     570       19504 :         typentry->eq_opr = eq_opr;
     571             : 
     572             :         /*
     573             :          * Reset info about hash functions whenever we pick up new info about
     574             :          * equality operator.  This is so we can ensure that the hash
     575             :          * functions match the operator.
     576             :          */
     577       19504 :         typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC |
     578             :                              TCFLAGS_CHECKED_HASH_EXTENDED_PROC);
     579       19504 :         typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
     580             :     }
     581      618504 :     if ((flags & TYPECACHE_LT_OPR) &&
     582      360620 :         !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
     583             :     {
     584       18656 :         Oid         lt_opr = InvalidOid;
     585             : 
     586       18656 :         if (typentry->btree_opf != InvalidOid)
     587       17708 :             lt_opr = get_opfamily_member(typentry->btree_opf,
     588             :                                          typentry->btree_opintype,
     589             :                                          typentry->btree_opintype,
     590             :                                          BTLessStrategyNumber);
     591             : 
     592             :         /*
     593             :          * As above, make sure array_cmp or record_cmp will succeed; but again
     594             :          * we need no special check for ranges.
     595             :          */
     596       18656 :         if (lt_opr == ARRAY_LT_OP &&
     597        3244 :             !array_element_has_compare(typentry))
     598         900 :             lt_opr = InvalidOid;
     599       17756 :         else if (lt_opr == RECORD_LT_OP &&
     600          78 :                  !record_fields_have_compare(typentry))
     601           8 :             lt_opr = InvalidOid;
     602             : 
     603       18656 :         typentry->lt_opr = lt_opr;
     604       18656 :         typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
     605             :     }
     606      618504 :     if ((flags & TYPECACHE_GT_OPR) &&
     607      359108 :         !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
     608             :     {
     609       18632 :         Oid         gt_opr = InvalidOid;
     610             : 
     611       18632 :         if (typentry->btree_opf != InvalidOid)
     612       17684 :             gt_opr = get_opfamily_member(typentry->btree_opf,
     613             :                                          typentry->btree_opintype,
     614             :                                          typentry->btree_opintype,
     615             :                                          BTGreaterStrategyNumber);
     616             : 
     617             :         /*
     618             :          * As above, make sure array_cmp or record_cmp will succeed; but again
     619             :          * we need no special check for ranges.
     620             :          */
     621       18632 :         if (gt_opr == ARRAY_GT_OP &&
     622        3238 :             !array_element_has_compare(typentry))
     623         900 :             gt_opr = InvalidOid;
     624       17732 :         else if (gt_opr == RECORD_GT_OP &&
     625          78 :                  !record_fields_have_compare(typentry))
     626           8 :             gt_opr = InvalidOid;
     627             : 
     628       18632 :         typentry->gt_opr = gt_opr;
     629       18632 :         typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
     630             :     }
     631      618504 :     if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
     632       28042 :         !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
     633             :     {
     634        2678 :         Oid         cmp_proc = InvalidOid;
     635             : 
     636        2678 :         if (typentry->btree_opf != InvalidOid)
     637        2218 :             cmp_proc = get_opfamily_proc(typentry->btree_opf,
     638             :                                          typentry->btree_opintype,
     639             :                                          typentry->btree_opintype,
     640             :                                          BTORDER_PROC);
     641             : 
     642             :         /*
     643             :          * As above, make sure array_cmp or record_cmp will succeed; but again
     644             :          * we need no special check for ranges.
     645             :          */
     646        2678 :         if (cmp_proc == F_BTARRAYCMP &&
     647          52 :             !array_element_has_compare(typentry))
     648           0 :             cmp_proc = InvalidOid;
     649        2678 :         else if (cmp_proc == F_BTRECORDCMP &&
     650          46 :                  !record_fields_have_compare(typentry))
     651           0 :             cmp_proc = InvalidOid;
     652             : 
     653             :         /* Force update of cmp_proc_finfo only if we're changing state */
     654        2678 :         if (typentry->cmp_proc != cmp_proc)
     655        2198 :             typentry->cmp_proc_finfo.fn_oid = InvalidOid;
     656             : 
     657        2678 :         typentry->cmp_proc = cmp_proc;
     658        2678 :         typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
     659             :     }
     660      618504 :     if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
     661      110972 :         !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
     662             :     {
     663        8416 :         Oid         hash_proc = InvalidOid;
     664             : 
     665             :         /*
     666             :          * We insist that the eq_opr, if one has been determined, match the
     667             :          * hash opclass; else report there is no hash function.
     668             :          */
     669        8416 :         if (typentry->hash_opf != InvalidOid &&
     670       16684 :             (!OidIsValid(typentry->eq_opr) ||
     671        8336 :              typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
     672             :                                                      typentry->hash_opintype,
     673             :                                                      typentry->hash_opintype,
     674             :                                                      HTEqualStrategyNumber)))
     675        8348 :             hash_proc = get_opfamily_proc(typentry->hash_opf,
     676             :                                           typentry->hash_opintype,
     677             :                                           typentry->hash_opintype,
     678             :                                           HASHSTANDARD_PROC);
     679             : 
     680             :         /*
     681             :          * As above, make sure hash_array will succeed.  We don't currently
     682             :          * support hashing for composite types, but when we do, we'll need
     683             :          * more logic here to check that case too.
     684             :          */
     685        8416 :         if (hash_proc == F_HASH_ARRAY &&
     686          28 :             !array_element_has_hashing(typentry))
     687           4 :             hash_proc = InvalidOid;
     688             : 
     689             :         /*
     690             :          * Likewise for hash_range.
     691             :          */
     692        8416 :         if (hash_proc == F_HASH_RANGE &&
     693          12 :             !range_element_has_hashing(typentry))
     694           4 :             hash_proc = InvalidOid;
     695             : 
     696             :         /* Force update of hash_proc_finfo only if we're changing state */
     697        8416 :         if (typentry->hash_proc != hash_proc)
     698        8088 :             typentry->hash_proc_finfo.fn_oid = InvalidOid;
     699             : 
     700        8416 :         typentry->hash_proc = hash_proc;
     701        8416 :         typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
     702             :     }
     703      618504 :     if ((flags & (TYPECACHE_HASH_EXTENDED_PROC |
     704        2882 :                   TYPECACHE_HASH_EXTENDED_PROC_FINFO)) &&
     705        2882 :         !(typentry->flags & TCFLAGS_CHECKED_HASH_EXTENDED_PROC))
     706             :     {
     707        2410 :         Oid         hash_extended_proc = InvalidOid;
     708             : 
     709             :         /*
     710             :          * We insist that the eq_opr, if one has been determined, match the
     711             :          * hash opclass; else report there is no hash function.
     712             :          */
     713        2410 :         if (typentry->hash_opf != InvalidOid &&
     714        4776 :             (!OidIsValid(typentry->eq_opr) ||
     715        2382 :              typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
     716             :                                                      typentry->hash_opintype,
     717             :                                                      typentry->hash_opintype,
     718             :                                                      HTEqualStrategyNumber)))
     719        2394 :             hash_extended_proc = get_opfamily_proc(typentry->hash_opf,
     720             :                                                    typentry->hash_opintype,
     721             :                                                    typentry->hash_opintype,
     722             :                                                    HASHEXTENDED_PROC);
     723             : 
     724             :         /*
     725             :          * As above, make sure hash_array_extended will succeed.  We don't
     726             :          * currently support hashing for composite types, but when we do,
     727             :          * we'll need more logic here to check that case too.
     728             :          */
     729        2410 :         if (hash_extended_proc == F_HASH_ARRAY_EXTENDED &&
     730           0 :             !array_element_has_extended_hashing(typentry))
     731           0 :             hash_extended_proc = InvalidOid;
     732             : 
     733             :         /*
     734             :          * Likewise for hash_range_extended.
     735             :          */
     736        2410 :         if (hash_extended_proc == F_HASH_RANGE_EXTENDED &&
     737           0 :             !range_element_has_extended_hashing(typentry))
     738           0 :             hash_extended_proc = InvalidOid;
     739             : 
     740             :         /* Force update of proc finfo only if we're changing state */
     741        2410 :         if (typentry->hash_extended_proc != hash_extended_proc)
     742        2394 :             typentry->hash_extended_proc_finfo.fn_oid = InvalidOid;
     743             : 
     744        2410 :         typentry->hash_extended_proc = hash_extended_proc;
     745        2410 :         typentry->flags |= TCFLAGS_CHECKED_HASH_EXTENDED_PROC;
     746             :     }
     747             : 
     748             :     /*
     749             :      * Set up fmgr lookup info as requested
     750             :      *
     751             :      * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
     752             :      * which is not quite right (they're really in the hash table's private
     753             :      * memory context) but this will do for our purposes.
     754             :      *
     755             :      * Note: the code above avoids invalidating the finfo structs unless the
     756             :      * referenced operator/function OID actually changes.  This is to prevent
     757             :      * unnecessary leakage of any subsidiary data attached to an finfo, since
     758             :      * that would cause session-lifespan memory leaks.
     759             :      */
     760      618504 :     if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
     761        4464 :         typentry->eq_opr_finfo.fn_oid == InvalidOid &&
     762        1014 :         typentry->eq_opr != InvalidOid)
     763             :     {
     764             :         Oid         eq_opr_func;
     765             : 
     766        1010 :         eq_opr_func = get_opcode(typentry->eq_opr);
     767        1010 :         if (eq_opr_func != InvalidOid)
     768        1010 :             fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
     769             :                           CacheMemoryContext);
     770             :     }
     771      618504 :     if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
     772       22534 :         typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
     773        7728 :         typentry->cmp_proc != InvalidOid)
     774             :     {
     775        2004 :         fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
     776             :                       CacheMemoryContext);
     777             :     }
     778      618504 :     if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
     779       17384 :         typentry->hash_proc_finfo.fn_oid == InvalidOid &&
     780        2326 :         typentry->hash_proc != InvalidOid)
     781             :     {
     782        2326 :         fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
     783             :                       CacheMemoryContext);
     784             :     }
     785      618504 :     if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
     786          28 :         typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
     787           8 :         typentry->hash_extended_proc != InvalidOid)
     788             :     {
     789           8 :         fmgr_info_cxt(typentry->hash_extended_proc,
     790             :                       &typentry->hash_extended_proc_finfo,
     791             :                       CacheMemoryContext);
     792             :     }
     793             : 
     794             :     /*
     795             :      * If it's a composite type (row type), get tupdesc if requested
     796             :      */
     797      618504 :     if ((flags & TYPECACHE_TUPDESC) &&
     798       55390 :         typentry->tupDesc == NULL &&
     799        2708 :         typentry->typtype == TYPTYPE_COMPOSITE)
     800             :     {
     801        2594 :         load_typcache_tupdesc(typentry);
     802             :     }
     803             : 
     804             :     /*
     805             :      * If requested, get information about a range type
     806             :      *
     807             :      * This includes making sure that the basic info about the range element
     808             :      * type is up-to-date.
     809             :      */
     810      618504 :     if ((flags & TYPECACHE_RANGE_INFO) &&
     811        4514 :         typentry->typtype == TYPTYPE_RANGE)
     812             :     {
     813        4514 :         if (typentry->rngelemtype == NULL)
     814         208 :             load_rangetype_info(typentry);
     815        4306 :         else if (!(typentry->rngelemtype->flags & TCFLAGS_HAVE_PG_TYPE_DATA))
     816           0 :             (void) lookup_type_cache(typentry->rngelemtype->type_id, 0);
     817             :     }
     818             : 
     819             :     /*
     820             :      * If requested, get information about a domain type
     821             :      */
     822      618504 :     if ((flags & TYPECACHE_DOMAIN_BASE_INFO) &&
     823        7208 :         typentry->domainBaseType == InvalidOid &&
     824        4084 :         typentry->typtype == TYPTYPE_DOMAIN)
     825             :     {
     826         860 :         typentry->domainBaseTypmod = -1;
     827         860 :         typentry->domainBaseType =
     828         860 :             getBaseTypeAndTypmod(type_id, &typentry->domainBaseTypmod);
     829             :     }
     830      618504 :     if ((flags & TYPECACHE_DOMAIN_CONSTR_INFO) &&
     831      110382 :         (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
     832        4630 :         typentry->typtype == TYPTYPE_DOMAIN)
     833             :     {
     834        2478 :         load_domaintype_info(typentry);
     835             :     }
     836             : 
     837      618504 :     return typentry;
     838             : }
     839             : 
     840             : /*
     841             :  * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
     842             :  */
     843             : static void
     844        2636 : load_typcache_tupdesc(TypeCacheEntry *typentry)
     845             : {
     846             :     Relation    rel;
     847             : 
     848        2636 :     if (!OidIsValid(typentry->typrelid)) /* should not happen */
     849           0 :         elog(ERROR, "invalid typrelid for composite type %u",
     850             :              typentry->type_id);
     851        2636 :     rel = relation_open(typentry->typrelid, AccessShareLock);
     852             :     Assert(rel->rd_rel->reltype == typentry->type_id);
     853             : 
     854             :     /*
     855             :      * Link to the tupdesc and increment its refcount (we assert it's a
     856             :      * refcounted descriptor).  We don't use IncrTupleDescRefCount() for this,
     857             :      * because the reference mustn't be entered in the current resource owner;
     858             :      * it can outlive the current query.
     859             :      */
     860        2636 :     typentry->tupDesc = RelationGetDescr(rel);
     861             : 
     862             :     Assert(typentry->tupDesc->tdrefcount > 0);
     863        2636 :     typentry->tupDesc->tdrefcount++;
     864             : 
     865             :     /*
     866             :      * In future, we could take some pains to not change tupDesc_identifier if
     867             :      * the tupdesc didn't really change; but for now it's not worth it.
     868             :      */
     869        2636 :     typentry->tupDesc_identifier = ++tupledesc_id_counter;
     870             : 
     871        2636 :     relation_close(rel, AccessShareLock);
     872        2636 : }
     873             : 
     874             : /*
     875             :  * load_rangetype_info --- helper routine to set up range type information
     876             :  */
     877             : static void
     878         208 : load_rangetype_info(TypeCacheEntry *typentry)
     879             : {
     880             :     Form_pg_range pg_range;
     881             :     HeapTuple   tup;
     882             :     Oid         subtypeOid;
     883             :     Oid         opclassOid;
     884             :     Oid         canonicalOid;
     885             :     Oid         subdiffOid;
     886             :     Oid         opfamilyOid;
     887             :     Oid         opcintype;
     888             :     Oid         cmpFnOid;
     889             : 
     890             :     /* get information from pg_range */
     891         208 :     tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
     892             :     /* should not fail, since we already checked typtype ... */
     893         208 :     if (!HeapTupleIsValid(tup))
     894           0 :         elog(ERROR, "cache lookup failed for range type %u",
     895             :              typentry->type_id);
     896         208 :     pg_range = (Form_pg_range) GETSTRUCT(tup);
     897             : 
     898         208 :     subtypeOid = pg_range->rngsubtype;
     899         208 :     typentry->rng_collation = pg_range->rngcollation;
     900         208 :     opclassOid = pg_range->rngsubopc;
     901         208 :     canonicalOid = pg_range->rngcanonical;
     902         208 :     subdiffOid = pg_range->rngsubdiff;
     903             : 
     904         208 :     ReleaseSysCache(tup);
     905             : 
     906             :     /* get opclass properties and look up the comparison function */
     907         208 :     opfamilyOid = get_opclass_family(opclassOid);
     908         208 :     opcintype = get_opclass_input_type(opclassOid);
     909             : 
     910         208 :     cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
     911             :                                  BTORDER_PROC);
     912         208 :     if (!RegProcedureIsValid(cmpFnOid))
     913           0 :         elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
     914             :              BTORDER_PROC, opcintype, opcintype, opfamilyOid);
     915             : 
     916             :     /* set up cached fmgrinfo structs */
     917         208 :     fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
     918             :                   CacheMemoryContext);
     919         208 :     if (OidIsValid(canonicalOid))
     920          86 :         fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
     921             :                       CacheMemoryContext);
     922         208 :     if (OidIsValid(subdiffOid))
     923         158 :         fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
     924             :                       CacheMemoryContext);
     925             : 
     926             :     /* Lastly, set up link to the element type --- this marks data valid */
     927         208 :     typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
     928         208 : }
     929             : 
     930             : 
     931             : /*
     932             :  * load_domaintype_info --- helper routine to set up domain constraint info
     933             :  *
     934             :  * Note: we assume we're called in a relatively short-lived context, so it's
     935             :  * okay to leak data into the current context while scanning pg_constraint.
     936             :  * We build the new DomainConstraintCache data in a context underneath
     937             :  * CurrentMemoryContext, and reparent it under CacheMemoryContext when
     938             :  * complete.
     939             :  */
     940             : static void
     941        2478 : load_domaintype_info(TypeCacheEntry *typentry)
     942             : {
     943        2478 :     Oid         typeOid = typentry->type_id;
     944             :     DomainConstraintCache *dcc;
     945        2478 :     bool        notNull = false;
     946             :     DomainConstraintState **ccons;
     947             :     int         cconslen;
     948             :     Relation    conRel;
     949             :     MemoryContext oldcxt;
     950             : 
     951             :     /*
     952             :      * If we're here, any existing constraint info is stale, so release it.
     953             :      * For safety, be sure to null the link before trying to delete the data.
     954             :      */
     955        2478 :     if (typentry->domainData)
     956             :     {
     957         308 :         dcc = typentry->domainData;
     958         308 :         typentry->domainData = NULL;
     959         308 :         decr_dcc_refcount(dcc);
     960             :     }
     961             : 
     962             :     /*
     963             :      * We try to optimize the common case of no domain constraints, so don't
     964             :      * create the dcc object and context until we find a constraint.  Likewise
     965             :      * for the temp sorting array.
     966             :      */
     967        2478 :     dcc = NULL;
     968        2478 :     ccons = NULL;
     969        2478 :     cconslen = 0;
     970             : 
     971             :     /*
     972             :      * Scan pg_constraint for relevant constraints.  We want to find
     973             :      * constraints for not just this domain, but any ancestor domains, so the
     974             :      * outer loop crawls up the domain stack.
     975             :      */
     976        2478 :     conRel = table_open(ConstraintRelationId, AccessShareLock);
     977             : 
     978             :     for (;;)
     979        2514 :     {
     980             :         HeapTuple   tup;
     981             :         HeapTuple   conTup;
     982             :         Form_pg_type typTup;
     983        4992 :         int         nccons = 0;
     984             :         ScanKeyData key[1];
     985             :         SysScanDesc scan;
     986             : 
     987        4992 :         tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
     988        4992 :         if (!HeapTupleIsValid(tup))
     989           0 :             elog(ERROR, "cache lookup failed for type %u", typeOid);
     990        4992 :         typTup = (Form_pg_type) GETSTRUCT(tup);
     991             : 
     992        4992 :         if (typTup->typtype != TYPTYPE_DOMAIN)
     993             :         {
     994             :             /* Not a domain, so done */
     995        2478 :             ReleaseSysCache(tup);
     996        2478 :             break;
     997             :         }
     998             : 
     999             :         /* Test for NOT NULL Constraint */
    1000        2514 :         if (typTup->typnotnull)
    1001          72 :             notNull = true;
    1002             : 
    1003             :         /* Look for CHECK Constraints on this domain */
    1004        2514 :         ScanKeyInit(&key[0],
    1005             :                     Anum_pg_constraint_contypid,
    1006             :                     BTEqualStrategyNumber, F_OIDEQ,
    1007             :                     ObjectIdGetDatum(typeOid));
    1008             : 
    1009        2514 :         scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
    1010             :                                   NULL, 1, key);
    1011             : 
    1012        3728 :         while (HeapTupleIsValid(conTup = systable_getnext(scan)))
    1013             :         {
    1014        1214 :             Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
    1015             :             Datum       val;
    1016             :             bool        isNull;
    1017             :             char       *constring;
    1018             :             Expr       *check_expr;
    1019             :             DomainConstraintState *r;
    1020             : 
    1021             :             /* Ignore non-CHECK constraints (presently, shouldn't be any) */
    1022        1214 :             if (c->contype != CONSTRAINT_CHECK)
    1023           0 :                 continue;
    1024             : 
    1025             :             /* Not expecting conbin to be NULL, but we'll test for it anyway */
    1026        1214 :             val = fastgetattr(conTup, Anum_pg_constraint_conbin,
    1027             :                               conRel->rd_att, &isNull);
    1028        1214 :             if (isNull)
    1029           0 :                 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
    1030             :                      NameStr(typTup->typname), NameStr(c->conname));
    1031             : 
    1032             :             /* Convert conbin to C string in caller context */
    1033        1214 :             constring = TextDatumGetCString(val);
    1034             : 
    1035             :             /* Create the DomainConstraintCache object and context if needed */
    1036        1214 :             if (dcc == NULL)
    1037             :             {
    1038             :                 MemoryContext cxt;
    1039             : 
    1040        1186 :                 cxt = AllocSetContextCreate(CurrentMemoryContext,
    1041             :                                             "Domain constraints",
    1042             :                                             ALLOCSET_SMALL_SIZES);
    1043             :                 dcc = (DomainConstraintCache *)
    1044        1186 :                     MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
    1045        1186 :                 dcc->constraints = NIL;
    1046        1186 :                 dcc->dccContext = cxt;
    1047        1186 :                 dcc->dccRefCount = 0;
    1048             :             }
    1049             : 
    1050             :             /* Create node trees in DomainConstraintCache's context */
    1051        1214 :             oldcxt = MemoryContextSwitchTo(dcc->dccContext);
    1052             : 
    1053        1214 :             check_expr = (Expr *) stringToNode(constring);
    1054             : 
    1055             :             /*
    1056             :              * Plan the expression, since ExecInitExpr will expect that.
    1057             :              *
    1058             :              * Note: caching the result of expression_planner() is not very
    1059             :              * good practice.  Ideally we'd use a CachedExpression here so
    1060             :              * that we would react promptly to, eg, changes in inlined
    1061             :              * functions.  However, because we don't support mutable domain
    1062             :              * CHECK constraints, it's not really clear that it's worth the
    1063             :              * extra overhead to do that.
    1064             :              */
    1065        1214 :             check_expr = expression_planner(check_expr);
    1066             : 
    1067        1214 :             r = makeNode(DomainConstraintState);
    1068        1214 :             r->constrainttype = DOM_CONSTRAINT_CHECK;
    1069        1214 :             r->name = pstrdup(NameStr(c->conname));
    1070        1214 :             r->check_expr = check_expr;
    1071        1214 :             r->check_exprstate = NULL;
    1072             : 
    1073        1214 :             MemoryContextSwitchTo(oldcxt);
    1074             : 
    1075             :             /* Accumulate constraints in an array, for sorting below */
    1076        1214 :             if (ccons == NULL)
    1077             :             {
    1078        1186 :                 cconslen = 8;
    1079             :                 ccons = (DomainConstraintState **)
    1080        1186 :                     palloc(cconslen * sizeof(DomainConstraintState *));
    1081             :             }
    1082          28 :             else if (nccons >= cconslen)
    1083             :             {
    1084           0 :                 cconslen *= 2;
    1085             :                 ccons = (DomainConstraintState **)
    1086           0 :                     repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
    1087             :             }
    1088        1214 :             ccons[nccons++] = r;
    1089             :         }
    1090             : 
    1091        2514 :         systable_endscan(scan);
    1092             : 
    1093        2514 :         if (nccons > 0)
    1094             :         {
    1095             :             /*
    1096             :              * Sort the items for this domain, so that CHECKs are applied in a
    1097             :              * deterministic order.
    1098             :              */
    1099        1204 :             if (nccons > 1)
    1100           8 :                 qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
    1101             : 
    1102             :             /*
    1103             :              * Now attach them to the overall list.  Use lcons() here because
    1104             :              * constraints of parent domains should be applied earlier.
    1105             :              */
    1106        1204 :             oldcxt = MemoryContextSwitchTo(dcc->dccContext);
    1107        2418 :             while (nccons > 0)
    1108        1214 :                 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
    1109        1204 :             MemoryContextSwitchTo(oldcxt);
    1110             :         }
    1111             : 
    1112             :         /* loop to next domain in stack */
    1113        2514 :         typeOid = typTup->typbasetype;
    1114        2514 :         ReleaseSysCache(tup);
    1115             :     }
    1116             : 
    1117        2478 :     table_close(conRel, AccessShareLock);
    1118             : 
    1119             :     /*
    1120             :      * Only need to add one NOT NULL check regardless of how many domains in
    1121             :      * the stack request it.
    1122             :      */
    1123        2478 :     if (notNull)
    1124             :     {
    1125             :         DomainConstraintState *r;
    1126             : 
    1127             :         /* Create the DomainConstraintCache object and context if needed */
    1128          72 :         if (dcc == NULL)
    1129             :         {
    1130             :             MemoryContext cxt;
    1131             : 
    1132          52 :             cxt = AllocSetContextCreate(CurrentMemoryContext,
    1133             :                                         "Domain constraints",
    1134             :                                         ALLOCSET_SMALL_SIZES);
    1135             :             dcc = (DomainConstraintCache *)
    1136          52 :                 MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
    1137          52 :             dcc->constraints = NIL;
    1138          52 :             dcc->dccContext = cxt;
    1139          52 :             dcc->dccRefCount = 0;
    1140             :         }
    1141             : 
    1142             :         /* Create node trees in DomainConstraintCache's context */
    1143          72 :         oldcxt = MemoryContextSwitchTo(dcc->dccContext);
    1144             : 
    1145          72 :         r = makeNode(DomainConstraintState);
    1146             : 
    1147          72 :         r->constrainttype = DOM_CONSTRAINT_NOTNULL;
    1148          72 :         r->name = pstrdup("NOT NULL");
    1149          72 :         r->check_expr = NULL;
    1150          72 :         r->check_exprstate = NULL;
    1151             : 
    1152             :         /* lcons to apply the nullness check FIRST */
    1153          72 :         dcc->constraints = lcons(r, dcc->constraints);
    1154             : 
    1155          72 :         MemoryContextSwitchTo(oldcxt);
    1156             :     }
    1157             : 
    1158             :     /*
    1159             :      * If we made a constraint object, move it into CacheMemoryContext and
    1160             :      * attach it to the typcache entry.
    1161             :      */
    1162        2478 :     if (dcc)
    1163             :     {
    1164        1238 :         MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
    1165        1238 :         typentry->domainData = dcc;
    1166        1238 :         dcc->dccRefCount++;      /* count the typcache's reference */
    1167             :     }
    1168             : 
    1169             :     /* Either way, the typcache entry's domain data is now valid. */
    1170        2478 :     typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
    1171        2478 : }
    1172             : 
    1173             : /*
    1174             :  * qsort comparator to sort DomainConstraintState pointers by name
    1175             :  */
    1176             : static int
    1177          10 : dcs_cmp(const void *a, const void *b)
    1178             : {
    1179          10 :     const DomainConstraintState *const *ca = (const DomainConstraintState *const *) a;
    1180          10 :     const DomainConstraintState *const *cb = (const DomainConstraintState *const *) b;
    1181             : 
    1182          10 :     return strcmp((*ca)->name, (*cb)->name);
    1183             : }
    1184             : 
    1185             : /*
    1186             :  * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
    1187             :  * and free it if no references remain
    1188             :  */
    1189             : static void
    1190       28626 : decr_dcc_refcount(DomainConstraintCache *dcc)
    1191             : {
    1192             :     Assert(dcc->dccRefCount > 0);
    1193       28626 :     if (--(dcc->dccRefCount) <= 0)
    1194         304 :         MemoryContextDelete(dcc->dccContext);
    1195       28626 : }
    1196             : 
    1197             : /*
    1198             :  * Context reset/delete callback for a DomainConstraintRef
    1199             :  */
    1200             : static void
    1201       30248 : dccref_deletion_callback(void *arg)
    1202             : {
    1203       30248 :     DomainConstraintRef *ref = (DomainConstraintRef *) arg;
    1204       30248 :     DomainConstraintCache *dcc = ref->dcc;
    1205             : 
    1206             :     /* Paranoia --- be sure link is nulled before trying to release */
    1207       30248 :     if (dcc)
    1208             :     {
    1209       28318 :         ref->constraints = NIL;
    1210       28318 :         ref->dcc = NULL;
    1211       28318 :         decr_dcc_refcount(dcc);
    1212             :     }
    1213       30248 : }
    1214             : 
    1215             : /*
    1216             :  * prep_domain_constraints --- prepare domain constraints for execution
    1217             :  *
    1218             :  * The expression trees stored in the DomainConstraintCache's list are
    1219             :  * converted to executable expression state trees stored in execctx.
    1220             :  */
    1221             : static List *
    1222        1926 : prep_domain_constraints(List *constraints, MemoryContext execctx)
    1223             : {
    1224        1926 :     List       *result = NIL;
    1225             :     MemoryContext oldcxt;
    1226             :     ListCell   *lc;
    1227             : 
    1228        1926 :     oldcxt = MemoryContextSwitchTo(execctx);
    1229             : 
    1230        3868 :     foreach(lc, constraints)
    1231             :     {
    1232        1942 :         DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
    1233             :         DomainConstraintState *newr;
    1234             : 
    1235        1942 :         newr = makeNode(DomainConstraintState);
    1236        1942 :         newr->constrainttype = r->constrainttype;
    1237        1942 :         newr->name = r->name;
    1238        1942 :         newr->check_expr = r->check_expr;
    1239        1942 :         newr->check_exprstate = ExecInitExpr(r->check_expr, NULL);
    1240             : 
    1241        1942 :         result = lappend(result, newr);
    1242             :     }
    1243             : 
    1244        1926 :     MemoryContextSwitchTo(oldcxt);
    1245             : 
    1246        1926 :     return result;
    1247             : }
    1248             : 
    1249             : /*
    1250             :  * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
    1251             :  *
    1252             :  * Caller must tell us the MemoryContext in which the DomainConstraintRef
    1253             :  * lives.  The ref will be cleaned up when that context is reset/deleted.
    1254             :  *
    1255             :  * Caller must also tell us whether it wants check_exprstate fields to be
    1256             :  * computed in the DomainConstraintState nodes attached to this ref.
    1257             :  * If it doesn't, we need not make a copy of the DomainConstraintState list.
    1258             :  */
    1259             : void
    1260       30276 : InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
    1261             :                         MemoryContext refctx, bool need_exprstate)
    1262             : {
    1263             :     /* Look up the typcache entry --- we assume it survives indefinitely */
    1264       30276 :     ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
    1265       30276 :     ref->need_exprstate = need_exprstate;
    1266             :     /* For safety, establish the callback before acquiring a refcount */
    1267       30276 :     ref->refctx = refctx;
    1268       30276 :     ref->dcc = NULL;
    1269       30276 :     ref->callback.func = dccref_deletion_callback;
    1270       30276 :     ref->callback.arg = (void *) ref;
    1271       30276 :     MemoryContextRegisterResetCallback(refctx, &ref->callback);
    1272             :     /* Acquire refcount if there are constraints, and set up exported list */
    1273       30276 :     if (ref->tcache->domainData)
    1274             :     {
    1275       28346 :         ref->dcc = ref->tcache->domainData;
    1276       28346 :         ref->dcc->dccRefCount++;
    1277       28346 :         if (ref->need_exprstate)
    1278        1926 :             ref->constraints = prep_domain_constraints(ref->dcc->constraints,
    1279             :                                                        ref->refctx);
    1280             :         else
    1281       26420 :             ref->constraints = ref->dcc->constraints;
    1282             :     }
    1283             :     else
    1284        1930 :         ref->constraints = NIL;
    1285       30276 : }
    1286             : 
    1287             : /*
    1288             :  * UpdateDomainConstraintRef --- recheck validity of domain constraint info
    1289             :  *
    1290             :  * If the domain's constraint set changed, ref->constraints is updated to
    1291             :  * point at a new list of cached constraints.
    1292             :  *
    1293             :  * In the normal case where nothing happened to the domain, this is cheap
    1294             :  * enough that it's reasonable (and expected) to check before *each* use
    1295             :  * of the constraint info.
    1296             :  */
    1297             : void
    1298     1531720 : UpdateDomainConstraintRef(DomainConstraintRef *ref)
    1299             : {
    1300     1531720 :     TypeCacheEntry *typentry = ref->tcache;
    1301             : 
    1302             :     /* Make sure typcache entry's data is up to date */
    1303     1531720 :     if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
    1304           0 :         typentry->typtype == TYPTYPE_DOMAIN)
    1305           0 :         load_domaintype_info(typentry);
    1306             : 
    1307             :     /* Transfer to ref object if there's new info, adjusting refcounts */
    1308     1531720 :     if (ref->dcc != typentry->domainData)
    1309             :     {
    1310             :         /* Paranoia --- be sure link is nulled before trying to release */
    1311           0 :         DomainConstraintCache *dcc = ref->dcc;
    1312             : 
    1313           0 :         if (dcc)
    1314             :         {
    1315             :             /*
    1316             :              * Note: we just leak the previous list of executable domain
    1317             :              * constraints.  Alternatively, we could keep those in a child
    1318             :              * context of ref->refctx and free that context at this point.
    1319             :              * However, in practice this code path will be taken so seldom
    1320             :              * that the extra bookkeeping for a child context doesn't seem
    1321             :              * worthwhile; we'll just allow a leak for the lifespan of refctx.
    1322             :              */
    1323           0 :             ref->constraints = NIL;
    1324           0 :             ref->dcc = NULL;
    1325           0 :             decr_dcc_refcount(dcc);
    1326             :         }
    1327           0 :         dcc = typentry->domainData;
    1328           0 :         if (dcc)
    1329             :         {
    1330           0 :             ref->dcc = dcc;
    1331           0 :             dcc->dccRefCount++;
    1332           0 :             if (ref->need_exprstate)
    1333           0 :                 ref->constraints = prep_domain_constraints(dcc->constraints,
    1334             :                                                            ref->refctx);
    1335             :             else
    1336           0 :                 ref->constraints = dcc->constraints;
    1337             :         }
    1338             :     }
    1339     1531720 : }
    1340             : 
    1341             : /*
    1342             :  * DomainHasConstraints --- utility routine to check if a domain has constraints
    1343             :  *
    1344             :  * This is defined to return false, not fail, if type is not a domain.
    1345             :  */
    1346             : bool
    1347       80106 : DomainHasConstraints(Oid type_id)
    1348             : {
    1349             :     TypeCacheEntry *typentry;
    1350             : 
    1351             :     /*
    1352             :      * Note: a side effect is to cause the typcache's domain data to become
    1353             :      * valid.  This is fine since we'll likely need it soon if there is any.
    1354             :      */
    1355       80106 :     typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_CONSTR_INFO);
    1356             : 
    1357       80106 :     return (typentry->domainData != NULL);
    1358             : }
    1359             : 
    1360             : 
    1361             : /*
    1362             :  * array_element_has_equality and friends are helper routines to check
    1363             :  * whether we should believe that array_eq and related functions will work
    1364             :  * on the given array type or composite type.
    1365             :  *
    1366             :  * The logic above may call these repeatedly on the same type entry, so we
    1367             :  * make use of the typentry->flags field to cache the results once known.
    1368             :  * Also, we assume that we'll probably want all these facts about the type
    1369             :  * if we want any, so we cache them all using only one lookup of the
    1370             :  * component datatype(s).
    1371             :  */
    1372             : 
    1373             : static bool
    1374        3254 : array_element_has_equality(TypeCacheEntry *typentry)
    1375             : {
    1376        3254 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1377        3234 :         cache_array_element_properties(typentry);
    1378        3254 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
    1379             : }
    1380             : 
    1381             : static bool
    1382        6534 : array_element_has_compare(TypeCacheEntry *typentry)
    1383             : {
    1384        6534 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1385          56 :         cache_array_element_properties(typentry);
    1386        6534 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
    1387             : }
    1388             : 
    1389             : static bool
    1390          28 : array_element_has_hashing(TypeCacheEntry *typentry)
    1391             : {
    1392          28 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1393           0 :         cache_array_element_properties(typentry);
    1394          28 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
    1395             : }
    1396             : 
    1397             : static bool
    1398           0 : array_element_has_extended_hashing(TypeCacheEntry *typentry)
    1399             : {
    1400           0 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1401           0 :         cache_array_element_properties(typentry);
    1402           0 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
    1403             : }
    1404             : 
    1405             : static void
    1406        3290 : cache_array_element_properties(TypeCacheEntry *typentry)
    1407             : {
    1408        3290 :     Oid         elem_type = get_base_element_type(typentry->type_id);
    1409             : 
    1410        3290 :     if (OidIsValid(elem_type))
    1411             :     {
    1412             :         TypeCacheEntry *elementry;
    1413             : 
    1414        2842 :         elementry = lookup_type_cache(elem_type,
    1415             :                                       TYPECACHE_EQ_OPR |
    1416             :                                       TYPECACHE_CMP_PROC |
    1417             :                                       TYPECACHE_HASH_PROC |
    1418             :                                       TYPECACHE_HASH_EXTENDED_PROC);
    1419        2842 :         if (OidIsValid(elementry->eq_opr))
    1420        2842 :             typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
    1421        2842 :         if (OidIsValid(elementry->cmp_proc))
    1422        2390 :             typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
    1423        2842 :         if (OidIsValid(elementry->hash_proc))
    1424        2830 :             typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
    1425        2842 :         if (OidIsValid(elementry->hash_extended_proc))
    1426        2830 :             typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
    1427             :     }
    1428        3290 :     typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
    1429        3290 : }
    1430             : 
    1431             : /*
    1432             :  * Likewise, some helper functions for composite types.
    1433             :  */
    1434             : 
    1435             : static bool
    1436         100 : record_fields_have_equality(TypeCacheEntry *typentry)
    1437             : {
    1438         100 :     if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
    1439          92 :         cache_record_field_properties(typentry);
    1440         100 :     return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
    1441             : }
    1442             : 
    1443             : static bool
    1444         202 : record_fields_have_compare(TypeCacheEntry *typentry)
    1445             : {
    1446         202 :     if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
    1447          34 :         cache_record_field_properties(typentry);
    1448         202 :     return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
    1449             : }
    1450             : 
    1451             : static void
    1452         126 : cache_record_field_properties(TypeCacheEntry *typentry)
    1453             : {
    1454             :     /*
    1455             :      * For type RECORD, we can't really tell what will work, since we don't
    1456             :      * have access here to the specific anonymous type.  Just assume that
    1457             :      * everything will (we may get a failure at runtime ...)
    1458             :      */
    1459         126 :     if (typentry->type_id == RECORDOID)
    1460          12 :         typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
    1461             :                             TCFLAGS_HAVE_FIELD_COMPARE);
    1462         114 :     else if (typentry->typtype == TYPTYPE_COMPOSITE)
    1463             :     {
    1464             :         TupleDesc   tupdesc;
    1465             :         int         newflags;
    1466             :         int         i;
    1467             : 
    1468             :         /* Fetch composite type's tupdesc if we don't have it already */
    1469         114 :         if (typentry->tupDesc == NULL)
    1470          42 :             load_typcache_tupdesc(typentry);
    1471         114 :         tupdesc = typentry->tupDesc;
    1472             : 
    1473             :         /* Must bump the refcount while we do additional catalog lookups */
    1474         114 :         IncrTupleDescRefCount(tupdesc);
    1475             : 
    1476             :         /* Have each property if all non-dropped fields have the property */
    1477         114 :         newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
    1478             :                     TCFLAGS_HAVE_FIELD_COMPARE);
    1479         366 :         for (i = 0; i < tupdesc->natts; i++)
    1480             :         {
    1481             :             TypeCacheEntry *fieldentry;
    1482         260 :             Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
    1483             : 
    1484         260 :             if (attr->attisdropped)
    1485           6 :                 continue;
    1486             : 
    1487         254 :             fieldentry = lookup_type_cache(attr->atttypid,
    1488             :                                            TYPECACHE_EQ_OPR |
    1489             :                                            TYPECACHE_CMP_PROC);
    1490         254 :             if (!OidIsValid(fieldentry->eq_opr))
    1491           8 :                 newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
    1492         254 :             if (!OidIsValid(fieldentry->cmp_proc))
    1493           8 :                 newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
    1494             : 
    1495             :             /* We can drop out of the loop once we disprove all bits */
    1496         254 :             if (newflags == 0)
    1497           8 :                 break;
    1498             :         }
    1499         114 :         typentry->flags |= newflags;
    1500             : 
    1501         114 :         DecrTupleDescRefCount(tupdesc);
    1502             :     }
    1503           0 :     else if (typentry->typtype == TYPTYPE_DOMAIN)
    1504             :     {
    1505             :         /* If it's domain over composite, copy base type's properties */
    1506             :         TypeCacheEntry *baseentry;
    1507             : 
    1508             :         /* load up basetype info if we didn't already */
    1509           0 :         if (typentry->domainBaseType == InvalidOid)
    1510             :         {
    1511           0 :             typentry->domainBaseTypmod = -1;
    1512           0 :             typentry->domainBaseType =
    1513           0 :                 getBaseTypeAndTypmod(typentry->type_id,
    1514             :                                      &typentry->domainBaseTypmod);
    1515             :         }
    1516           0 :         baseentry = lookup_type_cache(typentry->domainBaseType,
    1517             :                                       TYPECACHE_EQ_OPR |
    1518             :                                       TYPECACHE_CMP_PROC);
    1519           0 :         if (baseentry->typtype == TYPTYPE_COMPOSITE)
    1520             :         {
    1521           0 :             typentry->flags |= TCFLAGS_DOMAIN_BASE_IS_COMPOSITE;
    1522           0 :             typentry->flags |= baseentry->flags & (TCFLAGS_HAVE_FIELD_EQUALITY |
    1523             :                                                    TCFLAGS_HAVE_FIELD_COMPARE);
    1524             :         }
    1525             :     }
    1526         126 :     typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
    1527         126 : }
    1528             : 
    1529             : /*
    1530             :  * Likewise, some helper functions for range types.
    1531             :  *
    1532             :  * We can borrow the flag bits for array element properties to use for range
    1533             :  * element properties, since those flag bits otherwise have no use in a
    1534             :  * range type's typcache entry.
    1535             :  */
    1536             : 
    1537             : static bool
    1538          12 : range_element_has_hashing(TypeCacheEntry *typentry)
    1539             : {
    1540          12 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1541          12 :         cache_range_element_properties(typentry);
    1542          12 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
    1543             : }
    1544             : 
    1545             : static bool
    1546           0 : range_element_has_extended_hashing(TypeCacheEntry *typentry)
    1547             : {
    1548           0 :     if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
    1549           0 :         cache_range_element_properties(typentry);
    1550           0 :     return (typentry->flags & TCFLAGS_HAVE_ELEM_EXTENDED_HASHING) != 0;
    1551             : }
    1552             : 
    1553             : static void
    1554          12 : cache_range_element_properties(TypeCacheEntry *typentry)
    1555             : {
    1556             :     /* load up subtype link if we didn't already */
    1557          12 :     if (typentry->rngelemtype == NULL &&
    1558           0 :         typentry->typtype == TYPTYPE_RANGE)
    1559           0 :         load_rangetype_info(typentry);
    1560             : 
    1561          12 :     if (typentry->rngelemtype != NULL)
    1562             :     {
    1563             :         TypeCacheEntry *elementry;
    1564             : 
    1565             :         /* might need to calculate subtype's hash function properties */
    1566          12 :         elementry = lookup_type_cache(typentry->rngelemtype->type_id,
    1567             :                                       TYPECACHE_HASH_PROC |
    1568             :                                       TYPECACHE_HASH_EXTENDED_PROC);
    1569          12 :         if (OidIsValid(elementry->hash_proc))
    1570           8 :             typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
    1571          12 :         if (OidIsValid(elementry->hash_extended_proc))
    1572           8 :             typentry->flags |= TCFLAGS_HAVE_ELEM_EXTENDED_HASHING;
    1573             :     }
    1574          12 :     typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
    1575          12 : }
    1576             : 
    1577             : /*
    1578             :  * Make sure that RecordCacheArray and RecordIdentifierArray are large enough
    1579             :  * to store 'typmod'.
    1580             :  */
    1581             : static void
    1582       13530 : ensure_record_cache_typmod_slot_exists(int32 typmod)
    1583             : {
    1584       13530 :     if (RecordCacheArray == NULL)
    1585             :     {
    1586        1660 :         RecordCacheArray = (TupleDesc *)
    1587        1660 :             MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(TupleDesc));
    1588        1660 :         RecordIdentifierArray = (uint64 *)
    1589        1660 :             MemoryContextAllocZero(CacheMemoryContext, 64 * sizeof(uint64));
    1590        1660 :         RecordCacheArrayLen = 64;
    1591             :     }
    1592             : 
    1593       13530 :     if (typmod >= RecordCacheArrayLen)
    1594             :     {
    1595           0 :         int32       newlen = RecordCacheArrayLen * 2;
    1596             : 
    1597           0 :         while (typmod >= newlen)
    1598           0 :             newlen *= 2;
    1599             : 
    1600           0 :         RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
    1601             :                                                   newlen * sizeof(TupleDesc));
    1602           0 :         memset(RecordCacheArray + RecordCacheArrayLen, 0,
    1603           0 :                (newlen - RecordCacheArrayLen) * sizeof(TupleDesc));
    1604           0 :         RecordIdentifierArray = (uint64 *) repalloc(RecordIdentifierArray,
    1605             :                                                     newlen * sizeof(uint64));
    1606           0 :         memset(RecordIdentifierArray + RecordCacheArrayLen, 0,
    1607           0 :                (newlen - RecordCacheArrayLen) * sizeof(uint64));
    1608           0 :         RecordCacheArrayLen = newlen;
    1609             :     }
    1610       13530 : }
    1611             : 
    1612             : /*
    1613             :  * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
    1614             :  *
    1615             :  * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
    1616             :  * hasn't had its refcount bumped.
    1617             :  */
    1618             : static TupleDesc
    1619       62366 : lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
    1620             : {
    1621       62366 :     if (type_id != RECORDOID)
    1622             :     {
    1623             :         /*
    1624             :          * It's a named composite type, so use the regular typcache.
    1625             :          */
    1626             :         TypeCacheEntry *typentry;
    1627             : 
    1628       44010 :         typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
    1629       44010 :         if (typentry->tupDesc == NULL && !noError)
    1630           0 :             ereport(ERROR,
    1631             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    1632             :                      errmsg("type %s is not composite",
    1633             :                             format_type_be(type_id))));
    1634       44010 :         return typentry->tupDesc;
    1635             :     }
    1636             :     else
    1637             :     {
    1638             :         /*
    1639             :          * It's a transient record type, so look in our record-type table.
    1640             :          */
    1641       18356 :         if (typmod >= 0)
    1642             :         {
    1643             :             /* It is already in our local cache? */
    1644       18342 :             if (typmod < RecordCacheArrayLen &&
    1645       18338 :                 RecordCacheArray[typmod] != NULL)
    1646       18322 :                 return RecordCacheArray[typmod];
    1647             : 
    1648             :             /* Are we attached to a shared record typmod registry? */
    1649          20 :             if (CurrentSession->shared_typmod_registry != NULL)
    1650             :             {
    1651             :                 SharedTypmodTableEntry *entry;
    1652             : 
    1653             :                 /* Try to find it in the shared typmod index. */
    1654          20 :                 entry = dshash_find(CurrentSession->shared_typmod_table,
    1655             :                                     &typmod, false);
    1656          20 :                 if (entry != NULL)
    1657             :                 {
    1658             :                     TupleDesc   tupdesc;
    1659             : 
    1660             :                     tupdesc = (TupleDesc)
    1661          20 :                         dsa_get_address(CurrentSession->area,
    1662             :                                         entry->shared_tupdesc);
    1663             :                     Assert(typmod == tupdesc->tdtypmod);
    1664             : 
    1665             :                     /* We may need to extend the local RecordCacheArray. */
    1666          20 :                     ensure_record_cache_typmod_slot_exists(typmod);
    1667             : 
    1668             :                     /*
    1669             :                      * Our local array can now point directly to the TupleDesc
    1670             :                      * in shared memory, which is non-reference-counted.
    1671             :                      */
    1672          20 :                     RecordCacheArray[typmod] = tupdesc;
    1673             :                     Assert(tupdesc->tdrefcount == -1);
    1674             : 
    1675             :                     /*
    1676             :                      * We don't share tupdesc identifiers across processes, so
    1677             :                      * assign one locally.
    1678             :                      */
    1679          20 :                     RecordIdentifierArray[typmod] = ++tupledesc_id_counter;
    1680             : 
    1681          20 :                     dshash_release_lock(CurrentSession->shared_typmod_table,
    1682             :                                         entry);
    1683             : 
    1684          20 :                     return RecordCacheArray[typmod];
    1685             :                 }
    1686             :             }
    1687             :         }
    1688             : 
    1689          14 :         if (!noError)
    1690           0 :             ereport(ERROR,
    1691             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    1692             :                      errmsg("record type has not been registered")));
    1693          14 :         return NULL;
    1694             :     }
    1695             : }
    1696             : 
    1697             : /*
    1698             :  * lookup_rowtype_tupdesc
    1699             :  *
    1700             :  * Given a typeid/typmod that should describe a known composite type,
    1701             :  * return the tuple descriptor for the type.  Will ereport on failure.
    1702             :  * (Use ereport because this is reachable with user-specified OIDs,
    1703             :  * for example from record_in().)
    1704             :  *
    1705             :  * Note: on success, we increment the refcount of the returned TupleDesc,
    1706             :  * and log the reference in CurrentResourceOwner.  Caller should call
    1707             :  * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
    1708             :  */
    1709             : TupleDesc
    1710       41920 : lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
    1711             : {
    1712             :     TupleDesc   tupDesc;
    1713             : 
    1714       41920 :     tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
    1715       41920 :     PinTupleDesc(tupDesc);
    1716       41920 :     return tupDesc;
    1717             : }
    1718             : 
    1719             : /*
    1720             :  * lookup_rowtype_tupdesc_noerror
    1721             :  *
    1722             :  * As above, but if the type is not a known composite type and noError
    1723             :  * is true, returns NULL instead of ereport'ing.  (Note that if a bogus
    1724             :  * type_id is passed, you'll get an ereport anyway.)
    1725             :  */
    1726             : TupleDesc
    1727           8 : lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
    1728             : {
    1729             :     TupleDesc   tupDesc;
    1730             : 
    1731           8 :     tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
    1732           8 :     if (tupDesc != NULL)
    1733           8 :         PinTupleDesc(tupDesc);
    1734           8 :     return tupDesc;
    1735             : }
    1736             : 
    1737             : /*
    1738             :  * lookup_rowtype_tupdesc_copy
    1739             :  *
    1740             :  * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
    1741             :  * copied into the CurrentMemoryContext and is not reference-counted.
    1742             :  */
    1743             : TupleDesc
    1744       20422 : lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
    1745             : {
    1746             :     TupleDesc   tmp;
    1747             : 
    1748       20422 :     tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
    1749       20422 :     return CreateTupleDescCopyConstr(tmp);
    1750             : }
    1751             : 
    1752             : /*
    1753             :  * lookup_rowtype_tupdesc_domain
    1754             :  *
    1755             :  * Same as lookup_rowtype_tupdesc_noerror(), except that the type can also be
    1756             :  * a domain over a named composite type; so this is effectively equivalent to
    1757             :  * lookup_rowtype_tupdesc_noerror(getBaseType(type_id), typmod, noError)
    1758             :  * except for being a tad faster.
    1759             :  *
    1760             :  * Note: the reason we don't fold the look-through-domain behavior into plain
    1761             :  * lookup_rowtype_tupdesc() is that we want callers to know they might be
    1762             :  * dealing with a domain.  Otherwise they might construct a tuple that should
    1763             :  * be of the domain type, but not apply domain constraints.
    1764             :  */
    1765             : TupleDesc
    1766        1154 : lookup_rowtype_tupdesc_domain(Oid type_id, int32 typmod, bool noError)
    1767             : {
    1768             :     TupleDesc   tupDesc;
    1769             : 
    1770        1154 :     if (type_id != RECORDOID)
    1771             :     {
    1772             :         /*
    1773             :          * Check for domain or named composite type.  We might as well load
    1774             :          * whichever data is needed.
    1775             :          */
    1776             :         TypeCacheEntry *typentry;
    1777             : 
    1778        1138 :         typentry = lookup_type_cache(type_id,
    1779             :                                      TYPECACHE_TUPDESC |
    1780             :                                      TYPECACHE_DOMAIN_BASE_INFO);
    1781        1138 :         if (typentry->typtype == TYPTYPE_DOMAIN)
    1782           8 :             return lookup_rowtype_tupdesc_noerror(typentry->domainBaseType,
    1783             :                                                   typentry->domainBaseTypmod,
    1784             :                                                   noError);
    1785        1130 :         if (typentry->tupDesc == NULL && !noError)
    1786           0 :             ereport(ERROR,
    1787             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    1788             :                      errmsg("type %s is not composite",
    1789             :                             format_type_be(type_id))));
    1790        1130 :         tupDesc = typentry->tupDesc;
    1791             :     }
    1792             :     else
    1793          16 :         tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
    1794        1146 :     if (tupDesc != NULL)
    1795        1132 :         PinTupleDesc(tupDesc);
    1796        1146 :     return tupDesc;
    1797             : }
    1798             : 
    1799             : /*
    1800             :  * Hash function for the hash table of RecordCacheEntry.
    1801             :  */
    1802             : static uint32
    1803      110366 : record_type_typmod_hash(const void *data, size_t size)
    1804             : {
    1805      110366 :     RecordCacheEntry *entry = (RecordCacheEntry *) data;
    1806             : 
    1807      110366 :     return hashTupleDesc(entry->tupdesc);
    1808             : }
    1809             : 
    1810             : /*
    1811             :  * Match function for the hash table of RecordCacheEntry.
    1812             :  */
    1813             : static int
    1814      101004 : record_type_typmod_compare(const void *a, const void *b, size_t size)
    1815             : {
    1816      101004 :     RecordCacheEntry *left = (RecordCacheEntry *) a;
    1817      101004 :     RecordCacheEntry *right = (RecordCacheEntry *) b;
    1818             : 
    1819      101004 :     return equalTupleDescs(left->tupdesc, right->tupdesc) ? 0 : 1;
    1820             : }
    1821             : 
    1822             : /*
    1823             :  * assign_record_type_typmod
    1824             :  *
    1825             :  * Given a tuple descriptor for a RECORD type, find or create a cache entry
    1826             :  * for the type, and set the tupdesc's tdtypmod field to a value that will
    1827             :  * identify this cache entry to lookup_rowtype_tupdesc.
    1828             :  */
    1829             : void
    1830      110366 : assign_record_type_typmod(TupleDesc tupDesc)
    1831             : {
    1832             :     RecordCacheEntry *recentry;
    1833             :     TupleDesc   entDesc;
    1834             :     bool        found;
    1835             :     MemoryContext oldcxt;
    1836             : 
    1837             :     Assert(tupDesc->tdtypeid == RECORDOID);
    1838             : 
    1839      110366 :     if (RecordCacheHash == NULL)
    1840             :     {
    1841             :         /* First time through: initialize the hash table */
    1842             :         HASHCTL     ctl;
    1843             : 
    1844       23240 :         MemSet(&ctl, 0, sizeof(ctl));
    1845        1660 :         ctl.keysize = sizeof(TupleDesc);    /* just the pointer */
    1846        1660 :         ctl.entrysize = sizeof(RecordCacheEntry);
    1847        1660 :         ctl.hash = record_type_typmod_hash;
    1848        1660 :         ctl.match = record_type_typmod_compare;
    1849        1660 :         RecordCacheHash = hash_create("Record information cache", 64,
    1850             :                                       &ctl,
    1851             :                                       HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
    1852             : 
    1853             :         /* Also make sure CacheMemoryContext exists */
    1854        1660 :         if (!CacheMemoryContext)
    1855           0 :             CreateCacheMemoryContext();
    1856             :     }
    1857             : 
    1858             :     /* Find or create a hashtable entry for this tuple descriptor */
    1859      110366 :     recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
    1860             :                                                 (void *) &tupDesc,
    1861             :                                                 HASH_ENTER, &found);
    1862      110366 :     if (found && recentry->tupdesc != NULL)
    1863             :     {
    1864       96856 :         tupDesc->tdtypmod = recentry->tupdesc->tdtypmod;
    1865       96856 :         return;
    1866             :     }
    1867             : 
    1868             :     /* Not present, so need to manufacture an entry */
    1869       13510 :     recentry->tupdesc = NULL;
    1870       13510 :     oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
    1871             : 
    1872             :     /* Look in the SharedRecordTypmodRegistry, if attached */
    1873       13510 :     entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
    1874       13510 :     if (entDesc == NULL)
    1875             :     {
    1876             :         /* Reference-counted local cache only. */
    1877       13466 :         entDesc = CreateTupleDescCopy(tupDesc);
    1878       13466 :         entDesc->tdrefcount = 1;
    1879       13466 :         entDesc->tdtypmod = NextRecordTypmod++;
    1880             :     }
    1881       13510 :     ensure_record_cache_typmod_slot_exists(entDesc->tdtypmod);
    1882       13510 :     RecordCacheArray[entDesc->tdtypmod] = entDesc;
    1883       13510 :     recentry->tupdesc = entDesc;
    1884             : 
    1885             :     /* Assign a unique tupdesc identifier, too. */
    1886       13510 :     RecordIdentifierArray[entDesc->tdtypmod] = ++tupledesc_id_counter;
    1887             : 
    1888             :     /* Update the caller's tuple descriptor. */
    1889       13510 :     tupDesc->tdtypmod = entDesc->tdtypmod;
    1890             : 
    1891       13510 :     MemoryContextSwitchTo(oldcxt);
    1892             : }
    1893             : 
    1894             : /*
    1895             :  * assign_record_type_identifier
    1896             :  *
    1897             :  * Get an identifier, which will be unique over the lifespan of this backend
    1898             :  * process, for the current tuple descriptor of the specified composite type.
    1899             :  * For named composite types, the value is guaranteed to change if the type's
    1900             :  * definition does.  For registered RECORD types, the value will not change
    1901             :  * once assigned, since the registered type won't either.  If an anonymous
    1902             :  * RECORD type is specified, we return a new identifier on each call.
    1903             :  */
    1904             : uint64
    1905        2356 : assign_record_type_identifier(Oid type_id, int32 typmod)
    1906             : {
    1907        2356 :     if (type_id != RECORDOID)
    1908             :     {
    1909             :         /*
    1910             :          * It's a named composite type, so use the regular typcache.
    1911             :          */
    1912             :         TypeCacheEntry *typentry;
    1913             : 
    1914           0 :         typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
    1915           0 :         if (typentry->tupDesc == NULL)
    1916           0 :             ereport(ERROR,
    1917             :                     (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    1918             :                      errmsg("type %s is not composite",
    1919             :                             format_type_be(type_id))));
    1920             :         Assert(typentry->tupDesc_identifier != 0);
    1921           0 :         return typentry->tupDesc_identifier;
    1922             :     }
    1923             :     else
    1924             :     {
    1925             :         /*
    1926             :          * It's a transient record type, so look in our record-type table.
    1927             :          */
    1928        2356 :         if (typmod >= 0 && typmod < RecordCacheArrayLen &&
    1929          50 :             RecordCacheArray[typmod] != NULL)
    1930             :         {
    1931             :             Assert(RecordIdentifierArray[typmod] != 0);
    1932          50 :             return RecordIdentifierArray[typmod];
    1933             :         }
    1934             : 
    1935             :         /* For anonymous or unrecognized record type, generate a new ID */
    1936        2306 :         return ++tupledesc_id_counter;
    1937             :     }
    1938             : }
    1939             : 
    1940             : /*
    1941             :  * Return the amount of shmem required to hold a SharedRecordTypmodRegistry.
    1942             :  * This exists only to avoid exposing private innards of
    1943             :  * SharedRecordTypmodRegistry in a header.
    1944             :  */
    1945             : size_t
    1946          60 : SharedRecordTypmodRegistryEstimate(void)
    1947             : {
    1948          60 :     return sizeof(SharedRecordTypmodRegistry);
    1949             : }
    1950             : 
    1951             : /*
    1952             :  * Initialize 'registry' in a pre-existing shared memory region, which must be
    1953             :  * maximally aligned and have space for SharedRecordTypmodRegistryEstimate()
    1954             :  * bytes.
    1955             :  *
    1956             :  * 'area' will be used to allocate shared memory space as required for the
    1957             :  * typemod registration.  The current process, expected to be a leader process
    1958             :  * in a parallel query, will be attached automatically and its current record
    1959             :  * types will be loaded into *registry.  While attached, all calls to
    1960             :  * assign_record_type_typmod will use the shared registry.  Worker backends
    1961             :  * will need to attach explicitly.
    1962             :  *
    1963             :  * Note that this function takes 'area' and 'segment' as arguments rather than
    1964             :  * accessing them via CurrentSession, because they aren't installed there
    1965             :  * until after this function runs.
    1966             :  */
    1967             : void
    1968          60 : SharedRecordTypmodRegistryInit(SharedRecordTypmodRegistry *registry,
    1969             :                                dsm_segment *segment,
    1970             :                                dsa_area *area)
    1971             : {
    1972             :     MemoryContext old_context;
    1973             :     dshash_table *record_table;
    1974             :     dshash_table *typmod_table;
    1975             :     int32       typmod;
    1976             : 
    1977             :     Assert(!IsParallelWorker());
    1978             : 
    1979             :     /* We can't already be attached to a shared registry. */
    1980             :     Assert(CurrentSession->shared_typmod_registry == NULL);
    1981             :     Assert(CurrentSession->shared_record_table == NULL);
    1982             :     Assert(CurrentSession->shared_typmod_table == NULL);
    1983             : 
    1984          60 :     old_context = MemoryContextSwitchTo(TopMemoryContext);
    1985             : 
    1986             :     /* Create the hash table of tuple descriptors indexed by themselves. */
    1987          60 :     record_table = dshash_create(area, &srtr_record_table_params, area);
    1988             : 
    1989             :     /* Create the hash table of tuple descriptors indexed by typmod. */
    1990          60 :     typmod_table = dshash_create(area, &srtr_typmod_table_params, NULL);
    1991             : 
    1992          60 :     MemoryContextSwitchTo(old_context);
    1993             : 
    1994             :     /* Initialize the SharedRecordTypmodRegistry. */
    1995          60 :     registry->record_table_handle = dshash_get_hash_table_handle(record_table);
    1996          60 :     registry->typmod_table_handle = dshash_get_hash_table_handle(typmod_table);
    1997          60 :     pg_atomic_init_u32(&registry->next_typmod, NextRecordTypmod);
    1998             : 
    1999             :     /*
    2000             :      * Copy all entries from this backend's private registry into the shared
    2001             :      * registry.
    2002             :      */
    2003          64 :     for (typmod = 0; typmod < NextRecordTypmod; ++typmod)
    2004             :     {
    2005             :         SharedTypmodTableEntry *typmod_table_entry;
    2006             :         SharedRecordTableEntry *record_table_entry;
    2007             :         SharedRecordTableKey record_table_key;
    2008             :         dsa_pointer shared_dp;
    2009             :         TupleDesc   tupdesc;
    2010             :         bool        found;
    2011             : 
    2012           4 :         tupdesc = RecordCacheArray[typmod];
    2013           4 :         if (tupdesc == NULL)
    2014           0 :             continue;
    2015             : 
    2016             :         /* Copy the TupleDesc into shared memory. */
    2017           4 :         shared_dp = share_tupledesc(area, tupdesc, typmod);
    2018             : 
    2019             :         /* Insert into the typmod table. */
    2020           4 :         typmod_table_entry = dshash_find_or_insert(typmod_table,
    2021           4 :                                                    &tupdesc->tdtypmod,
    2022             :                                                    &found);
    2023           4 :         if (found)
    2024           0 :             elog(ERROR, "cannot create duplicate shared record typmod");
    2025           4 :         typmod_table_entry->typmod = tupdesc->tdtypmod;
    2026           4 :         typmod_table_entry->shared_tupdesc = shared_dp;
    2027           4 :         dshash_release_lock(typmod_table, typmod_table_entry);
    2028             : 
    2029             :         /* Insert into the record table. */
    2030           4 :         record_table_key.shared = false;
    2031           4 :         record_table_key.u.local_tupdesc = tupdesc;
    2032           4 :         record_table_entry = dshash_find_or_insert(record_table,
    2033             :                                                    &record_table_key,
    2034             :                                                    &found);
    2035           4 :         if (!found)
    2036             :         {
    2037           4 :             record_table_entry->key.shared = true;
    2038           4 :             record_table_entry->key.u.shared_tupdesc = shared_dp;
    2039             :         }
    2040           4 :         dshash_release_lock(record_table, record_table_entry);
    2041             :     }
    2042             : 
    2043             :     /*
    2044             :      * Set up the global state that will tell assign_record_type_typmod and
    2045             :      * lookup_rowtype_tupdesc_internal about the shared registry.
    2046             :      */
    2047          60 :     CurrentSession->shared_record_table = record_table;
    2048          60 :     CurrentSession->shared_typmod_table = typmod_table;
    2049          60 :     CurrentSession->shared_typmod_registry = registry;
    2050             : 
    2051             :     /*
    2052             :      * We install a detach hook in the leader, but only to handle cleanup on
    2053             :      * failure during GetSessionDsmHandle().  Once GetSessionDsmHandle() pins
    2054             :      * the memory, the leader process will use a shared registry until it
    2055             :      * exits.
    2056             :      */
    2057          60 :     on_dsm_detach(segment, shared_record_typmod_registry_detach, (Datum) 0);
    2058          60 : }
    2059             : 
    2060             : /*
    2061             :  * Attach to 'registry', which must have been initialized already by another
    2062             :  * backend.  Future calls to assign_record_type_typmod and
    2063             :  * lookup_rowtype_tupdesc_internal will use the shared registry until the
    2064             :  * current session is detached.
    2065             :  */
    2066             : void
    2067        1570 : SharedRecordTypmodRegistryAttach(SharedRecordTypmodRegistry *registry)
    2068             : {
    2069             :     MemoryContext old_context;
    2070             :     dshash_table *record_table;
    2071             :     dshash_table *typmod_table;
    2072             : 
    2073             :     Assert(IsParallelWorker());
    2074             : 
    2075             :     /* We can't already be attached to a shared registry. */
    2076             :     Assert(CurrentSession != NULL);
    2077             :     Assert(CurrentSession->segment != NULL);
    2078             :     Assert(CurrentSession->area != NULL);
    2079             :     Assert(CurrentSession->shared_typmod_registry == NULL);
    2080             :     Assert(CurrentSession->shared_record_table == NULL);
    2081             :     Assert(CurrentSession->shared_typmod_table == NULL);
    2082             : 
    2083             :     /*
    2084             :      * We can't already have typmods in our local cache, because they'd clash
    2085             :      * with those imported by SharedRecordTypmodRegistryInit.  This should be
    2086             :      * a freshly started parallel worker.  If we ever support worker
    2087             :      * recycling, a worker would need to zap its local cache in between
    2088             :      * servicing different queries, in order to be able to call this and
    2089             :      * synchronize typmods with a new leader; but that's problematic because
    2090             :      * we can't be very sure that record-typmod-related state hasn't escaped
    2091             :      * to anywhere else in the process.
    2092             :      */
    2093             :     Assert(NextRecordTypmod == 0);
    2094             : 
    2095        1570 :     old_context = MemoryContextSwitchTo(TopMemoryContext);
    2096             : 
    2097             :     /* Attach to the two hash tables. */
    2098        1570 :     record_table = dshash_attach(CurrentSession->area,
    2099             :                                  &srtr_record_table_params,
    2100             :                                  registry->record_table_handle,
    2101        1570 :                                  CurrentSession->area);
    2102        1570 :     typmod_table = dshash_attach(CurrentSession->area,
    2103             :                                  &srtr_typmod_table_params,
    2104             :                                  registry->typmod_table_handle,
    2105             :                                  NULL);
    2106             : 
    2107        1570 :     MemoryContextSwitchTo(old_context);
    2108             : 
    2109             :     /*
    2110             :      * Set up detach hook to run at worker exit.  Currently this is the same
    2111             :      * as the leader's detach hook, but in future they might need to be
    2112             :      * different.
    2113             :      */
    2114        1570 :     on_dsm_detach(CurrentSession->segment,
    2115             :                   shared_record_typmod_registry_detach,
    2116             :                   PointerGetDatum(registry));
    2117             : 
    2118             :     /*
    2119             :      * Set up the session state that will tell assign_record_type_typmod and
    2120             :      * lookup_rowtype_tupdesc_internal about the shared registry.
    2121             :      */
    2122        1570 :     CurrentSession->shared_typmod_registry = registry;
    2123        1570 :     CurrentSession->shared_record_table = record_table;
    2124        1570 :     CurrentSession->shared_typmod_table = typmod_table;
    2125        1570 : }
    2126             : 
    2127             : /*
    2128             :  * TypeCacheRelCallback
    2129             :  *      Relcache inval callback function
    2130             :  *
    2131             :  * Delete the cached tuple descriptor (if any) for the given rel's composite
    2132             :  * type, or for all composite types if relid == InvalidOid.  Also reset
    2133             :  * whatever info we have cached about the composite type's comparability.
    2134             :  *
    2135             :  * This is called when a relcache invalidation event occurs for the given
    2136             :  * relid.  We must scan the whole typcache hash since we don't know the
    2137             :  * type OID corresponding to the relid.  We could do a direct search if this
    2138             :  * were a syscache-flush callback on pg_type, but then we would need all
    2139             :  * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
    2140             :  * invals against the rel's pg_type OID.  The extra SI signaling could very
    2141             :  * well cost more than we'd save, since in most usages there are not very
    2142             :  * many entries in a backend's typcache.  The risk of bugs-of-omission seems
    2143             :  * high, too.
    2144             :  *
    2145             :  * Another possibility, with only localized impact, is to maintain a second
    2146             :  * hashtable that indexes composite-type typcache entries by their typrelid.
    2147             :  * But it's still not clear it's worth the trouble.
    2148             :  */
    2149             : static void
    2150     1026158 : TypeCacheRelCallback(Datum arg, Oid relid)
    2151             : {
    2152             :     HASH_SEQ_STATUS status;
    2153             :     TypeCacheEntry *typentry;
    2154             : 
    2155             :     /* TypeCacheHash must exist, else this callback wouldn't be registered */
    2156     1026158 :     hash_seq_init(&status, TypeCacheHash);
    2157    12427526 :     while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
    2158             :     {
    2159    11401368 :         if (typentry->typtype == TYPTYPE_COMPOSITE)
    2160             :         {
    2161             :             /* Skip if no match, unless we're zapping all composite types */
    2162     2221458 :             if (relid != typentry->typrelid && relid != InvalidOid)
    2163     2212448 :                 continue;
    2164             : 
    2165             :             /* Delete tupdesc if we have it */
    2166        9010 :             if (typentry->tupDesc != NULL)
    2167             :             {
    2168             :                 /*
    2169             :                  * Release our refcount, and free the tupdesc if none remain.
    2170             :                  * (Can't use DecrTupleDescRefCount because this reference is
    2171             :                  * not logged in current resource owner.)
    2172             :                  */
    2173             :                 Assert(typentry->tupDesc->tdrefcount > 0);
    2174        2170 :                 if (--typentry->tupDesc->tdrefcount == 0)
    2175        1312 :                     FreeTupleDesc(typentry->tupDesc);
    2176        2170 :                 typentry->tupDesc = NULL;
    2177             : 
    2178             :                 /*
    2179             :                  * Also clear tupDesc_identifier, so that anything watching
    2180             :                  * that will realize that the tupdesc has possibly changed.
    2181             :                  * (Alternatively, we could specify that to detect possible
    2182             :                  * tupdesc change, one must check for tupDesc != NULL as well
    2183             :                  * as tupDesc_identifier being the same as what was previously
    2184             :                  * seen.  That seems error-prone.)
    2185             :                  */
    2186        2170 :                 typentry->tupDesc_identifier = 0;
    2187             :             }
    2188             : 
    2189             :             /* Reset equality/comparison/hashing validity information */
    2190        9010 :             typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
    2191             :         }
    2192     9179910 :         else if (typentry->typtype == TYPTYPE_DOMAIN)
    2193             :         {
    2194             :             /*
    2195             :              * If it's domain over composite, reset flags.  (We don't bother
    2196             :              * trying to determine whether the specific base type needs a
    2197             :              * reset.)  Note that if we haven't determined whether the base
    2198             :              * type is composite, we don't need to reset anything.
    2199             :              */
    2200     1169336 :             if (typentry->flags & TCFLAGS_DOMAIN_BASE_IS_COMPOSITE)
    2201           0 :                 typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
    2202             :         }
    2203             :     }
    2204     1026158 : }
    2205             : 
    2206             : /*
    2207             :  * TypeCacheTypCallback
    2208             :  *      Syscache inval callback function
    2209             :  *
    2210             :  * This is called when a syscache invalidation event occurs for any
    2211             :  * pg_type row.  If we have information cached about that type, mark
    2212             :  * it as needing to be reloaded.
    2213             :  */
    2214             : static void
    2215      387976 : TypeCacheTypCallback(Datum arg, int cacheid, uint32 hashvalue)
    2216             : {
    2217             :     HASH_SEQ_STATUS status;
    2218             :     TypeCacheEntry *typentry;
    2219             : 
    2220             :     /* TypeCacheHash must exist, else this callback wouldn't be registered */
    2221      387976 :     hash_seq_init(&status, TypeCacheHash);
    2222     3862694 :     while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
    2223             :     {
    2224             :         /* Is this the targeted type row (or it's a total cache flush)? */
    2225     3474718 :         if (hashvalue == 0 || typentry->type_id_hash == hashvalue)
    2226             :         {
    2227             :             /*
    2228             :              * Mark the data obtained directly from pg_type as invalid.  Also,
    2229             :              * if it's a domain, typnotnull might've changed, so we'll need to
    2230             :              * recalculate its constraints.
    2231             :              */
    2232        3168 :             typentry->flags &= ~(TCFLAGS_HAVE_PG_TYPE_DATA |
    2233             :                                  TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS);
    2234             :         }
    2235             :     }
    2236      387976 : }
    2237             : 
    2238             : /*
    2239             :  * TypeCacheOpcCallback
    2240             :  *      Syscache inval callback function
    2241             :  *
    2242             :  * This is called when a syscache invalidation event occurs for any pg_opclass
    2243             :  * row.  In principle we could probably just invalidate data dependent on the
    2244             :  * particular opclass, but since updates on pg_opclass are rare in production
    2245             :  * it doesn't seem worth a lot of complication: we just mark all cached data
    2246             :  * invalid.
    2247             :  *
    2248             :  * Note that we don't bother watching for updates on pg_amop or pg_amproc.
    2249             :  * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
    2250             :  * is not allowed to be used to add/drop the primary operators and functions
    2251             :  * of an opclass, only cross-type members of a family; and the latter sorts
    2252             :  * of members are not going to get cached here.
    2253             :  */
    2254             : static void
    2255         788 : TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
    2256             : {
    2257             :     HASH_SEQ_STATUS status;
    2258             :     TypeCacheEntry *typentry;
    2259             : 
    2260             :     /* TypeCacheHash must exist, else this callback wouldn't be registered */
    2261         788 :     hash_seq_init(&status, TypeCacheHash);
    2262        4240 :     while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
    2263             :     {
    2264             :         /* Reset equality/comparison/hashing validity information */
    2265        3452 :         typentry->flags &= ~TCFLAGS_OPERATOR_FLAGS;
    2266             :     }
    2267         788 : }
    2268             : 
    2269             : /*
    2270             :  * TypeCacheConstrCallback
    2271             :  *      Syscache inval callback function
    2272             :  *
    2273             :  * This is called when a syscache invalidation event occurs for any
    2274             :  * pg_constraint row.  We flush information about domain constraints
    2275             :  * when this happens.
    2276             :  *
    2277             :  * It's slightly annoying that we can't tell whether the inval event was for
    2278             :  * a domain constraint record or not; there's usually more update traffic
    2279             :  * for table constraints than domain constraints, so we'll do a lot of
    2280             :  * useless flushes.  Still, this is better than the old no-caching-at-all
    2281             :  * approach to domain constraints.
    2282             :  */
    2283             : static void
    2284       40830 : TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
    2285             : {
    2286             :     TypeCacheEntry *typentry;
    2287             : 
    2288             :     /*
    2289             :      * Because this is called very frequently, and typically very few of the
    2290             :      * typcache entries are for domains, we don't use hash_seq_search here.
    2291             :      * Instead we thread all the domain-type entries together so that we can
    2292             :      * visit them cheaply.
    2293             :      */
    2294       81204 :     for (typentry = firstDomainTypeEntry;
    2295             :          typentry != NULL;
    2296       40374 :          typentry = typentry->nextDomain)
    2297             :     {
    2298             :         /* Reset domain constraint validity information */
    2299       40374 :         typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
    2300             :     }
    2301       40830 : }
    2302             : 
    2303             : 
    2304             : /*
    2305             :  * Check if given OID is part of the subset that's sortable by comparisons
    2306             :  */
    2307             : static inline bool
    2308      300080 : enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
    2309             : {
    2310             :     Oid         offset;
    2311             : 
    2312      300080 :     if (arg < enumdata->bitmap_base)
    2313           0 :         return false;
    2314      300080 :     offset = arg - enumdata->bitmap_base;
    2315      300080 :     if (offset > (Oid) INT_MAX)
    2316           0 :         return false;
    2317      300080 :     return bms_is_member((int) offset, enumdata->sorted_values);
    2318             : }
    2319             : 
    2320             : 
    2321             : /*
    2322             :  * compare_values_of_enum
    2323             :  *      Compare two members of an enum type.
    2324             :  *      Return <0, 0, or >0 according as arg1 <, =, or > arg2.
    2325             :  *
    2326             :  * Note: currently, the enumData cache is refreshed only if we are asked
    2327             :  * to compare an enum value that is not already in the cache.  This is okay
    2328             :  * because there is no support for re-ordering existing values, so comparisons
    2329             :  * of previously cached values will return the right answer even if other
    2330             :  * values have been added since we last loaded the cache.
    2331             :  *
    2332             :  * Note: the enum logic has a special-case rule about even-numbered versus
    2333             :  * odd-numbered OIDs, but we take no account of that rule here; this
    2334             :  * routine shouldn't even get called when that rule applies.
    2335             :  */
    2336             : int
    2337      150052 : compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
    2338             : {
    2339             :     TypeCacheEnumData *enumdata;
    2340             :     EnumItem   *item1;
    2341             :     EnumItem   *item2;
    2342             : 
    2343             :     /*
    2344             :      * Equal OIDs are certainly equal --- this case was probably handled by
    2345             :      * our caller, but we may as well check.
    2346             :      */
    2347      150052 :     if (arg1 == arg2)
    2348           0 :         return 0;
    2349             : 
    2350             :     /* Load up the cache if first time through */
    2351      150052 :     if (tcache->enumData == NULL)
    2352           6 :         load_enum_cache_data(tcache);
    2353      150052 :     enumdata = tcache->enumData;
    2354             : 
    2355             :     /*
    2356             :      * If both OIDs are known-sorted, we can just compare them directly.
    2357             :      */
    2358      300080 :     if (enum_known_sorted(enumdata, arg1) &&
    2359      150028 :         enum_known_sorted(enumdata, arg2))
    2360             :     {
    2361           0 :         if (arg1 < arg2)
    2362           0 :             return -1;
    2363             :         else
    2364           0 :             return 1;
    2365             :     }
    2366             : 
    2367             :     /*
    2368             :      * Slow path: we have to identify their actual sort-order positions.
    2369             :      */
    2370      150052 :     item1 = find_enumitem(enumdata, arg1);
    2371      150052 :     item2 = find_enumitem(enumdata, arg2);
    2372             : 
    2373      150052 :     if (item1 == NULL || item2 == NULL)
    2374             :     {
    2375             :         /*
    2376             :          * We couldn't find one or both values.  That means the enum has
    2377             :          * changed under us, so re-initialize the cache and try again. We
    2378             :          * don't bother retrying the known-sorted case in this path.
    2379             :          */
    2380           0 :         load_enum_cache_data(tcache);
    2381           0 :         enumdata = tcache->enumData;
    2382             : 
    2383           0 :         item1 = find_enumitem(enumdata, arg1);
    2384           0 :         item2 = find_enumitem(enumdata, arg2);
    2385             : 
    2386             :         /*
    2387             :          * If we still can't find the values, complain: we must have corrupt
    2388             :          * data.
    2389             :          */
    2390           0 :         if (item1 == NULL)
    2391           0 :             elog(ERROR, "enum value %u not found in cache for enum %s",
    2392             :                  arg1, format_type_be(tcache->type_id));
    2393           0 :         if (item2 == NULL)
    2394           0 :             elog(ERROR, "enum value %u not found in cache for enum %s",
    2395             :                  arg2, format_type_be(tcache->type_id));
    2396             :     }
    2397             : 
    2398      150052 :     if (item1->sort_order < item2->sort_order)
    2399       50016 :         return -1;
    2400      100036 :     else if (item1->sort_order > item2->sort_order)
    2401      100036 :         return 1;
    2402             :     else
    2403           0 :         return 0;
    2404             : }
    2405             : 
    2406             : /*
    2407             :  * Load (or re-load) the enumData member of the typcache entry.
    2408             :  */
    2409             : static void
    2410           6 : load_enum_cache_data(TypeCacheEntry *tcache)
    2411             : {
    2412             :     TypeCacheEnumData *enumdata;
    2413             :     Relation    enum_rel;
    2414             :     SysScanDesc enum_scan;
    2415             :     HeapTuple   enum_tuple;
    2416             :     ScanKeyData skey;
    2417             :     EnumItem   *items;
    2418             :     int         numitems;
    2419             :     int         maxitems;
    2420             :     Oid         bitmap_base;
    2421             :     Bitmapset  *bitmap;
    2422             :     MemoryContext oldcxt;
    2423             :     int         bm_size,
    2424             :                 start_pos;
    2425             : 
    2426             :     /* Check that this is actually an enum */
    2427           6 :     if (tcache->typtype != TYPTYPE_ENUM)
    2428           0 :         ereport(ERROR,
    2429             :                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
    2430             :                  errmsg("%s is not an enum",
    2431             :                         format_type_be(tcache->type_id))));
    2432             : 
    2433             :     /*
    2434             :      * Read all the information for members of the enum type.  We collect the
    2435             :      * info in working memory in the caller's context, and then transfer it to
    2436             :      * permanent memory in CacheMemoryContext.  This minimizes the risk of
    2437             :      * leaking memory from CacheMemoryContext in the event of an error partway
    2438             :      * through.
    2439             :      */
    2440           6 :     maxitems = 64;
    2441           6 :     items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
    2442           6 :     numitems = 0;
    2443             : 
    2444             :     /* Scan pg_enum for the members of the target enum type. */
    2445           6 :     ScanKeyInit(&skey,
    2446             :                 Anum_pg_enum_enumtypid,
    2447             :                 BTEqualStrategyNumber, F_OIDEQ,
    2448           6 :                 ObjectIdGetDatum(tcache->type_id));
    2449             : 
    2450           6 :     enum_rel = table_open(EnumRelationId, AccessShareLock);
    2451           6 :     enum_scan = systable_beginscan(enum_rel,
    2452             :                                    EnumTypIdLabelIndexId,
    2453             :                                    true, NULL,
    2454             :                                    1, &skey);
    2455             : 
    2456          46 :     while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
    2457             :     {
    2458          40 :         Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
    2459             : 
    2460          40 :         if (numitems >= maxitems)
    2461             :         {
    2462           0 :             maxitems *= 2;
    2463           0 :             items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
    2464             :         }
    2465          40 :         items[numitems].enum_oid = en->oid;
    2466          40 :         items[numitems].sort_order = en->enumsortorder;
    2467          40 :         numitems++;
    2468             :     }
    2469             : 
    2470           6 :     systable_endscan(enum_scan);
    2471           6 :     table_close(enum_rel, AccessShareLock);
    2472             : 
    2473             :     /* Sort the items into OID order */
    2474           6 :     qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
    2475             : 
    2476             :     /*
    2477             :      * Here, we create a bitmap listing a subset of the enum's OIDs that are
    2478             :      * known to be in order and can thus be compared with just OID comparison.
    2479             :      *
    2480             :      * The point of this is that the enum's initial OIDs were certainly in
    2481             :      * order, so there is some subset that can be compared via OID comparison;
    2482             :      * and we'd rather not do binary searches unnecessarily.
    2483             :      *
    2484             :      * This is somewhat heuristic, and might identify a subset of OIDs that
    2485             :      * isn't exactly what the type started with.  That's okay as long as the
    2486             :      * subset is correctly sorted.
    2487             :      */
    2488           6 :     bitmap_base = InvalidOid;
    2489           6 :     bitmap = NULL;
    2490           6 :     bm_size = 1;                /* only save sets of at least 2 OIDs */
    2491             : 
    2492          14 :     for (start_pos = 0; start_pos < numitems - 1; start_pos++)
    2493             :     {
    2494             :         /*
    2495             :          * Identify longest sorted subsequence starting at start_pos
    2496             :          */
    2497          14 :         Bitmapset  *this_bitmap = bms_make_singleton(0);
    2498          14 :         int         this_bm_size = 1;
    2499          14 :         Oid         start_oid = items[start_pos].enum_oid;
    2500          14 :         float4      prev_order = items[start_pos].sort_order;
    2501             :         int         i;
    2502             : 
    2503          92 :         for (i = start_pos + 1; i < numitems; i++)
    2504             :         {
    2505             :             Oid         offset;
    2506             : 
    2507          78 :             offset = items[i].enum_oid - start_oid;
    2508             :             /* quit if bitmap would be too large; cutoff is arbitrary */
    2509          78 :             if (offset >= 8192)
    2510           0 :                 break;
    2511             :             /* include the item if it's in-order */
    2512          78 :             if (items[i].sort_order > prev_order)
    2513             :             {
    2514          40 :                 prev_order = items[i].sort_order;
    2515          40 :                 this_bitmap = bms_add_member(this_bitmap, (int) offset);
    2516          40 :                 this_bm_size++;
    2517             :             }
    2518             :         }
    2519             : 
    2520             :         /* Remember it if larger than previous best */
    2521          14 :         if (this_bm_size > bm_size)
    2522             :         {
    2523           6 :             bms_free(bitmap);
    2524           6 :             bitmap_base = start_oid;
    2525           6 :             bitmap = this_bitmap;
    2526           6 :             bm_size = this_bm_size;
    2527             :         }
    2528             :         else
    2529           8 :             bms_free(this_bitmap);
    2530             : 
    2531             :         /*
    2532             :          * Done if it's not possible to find a longer sequence in the rest of
    2533             :          * the list.  In typical cases this will happen on the first
    2534             :          * iteration, which is why we create the bitmaps on the fly instead of
    2535             :          * doing a second pass over the list.
    2536             :          */
    2537          14 :         if (bm_size >= (numitems - start_pos - 1))
    2538           6 :             break;
    2539             :     }
    2540             : 
    2541             :     /* OK, copy the data into CacheMemoryContext */
    2542           6 :     oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
    2543             :     enumdata = (TypeCacheEnumData *)
    2544           6 :         palloc(offsetof(TypeCacheEnumData, enum_values) +
    2545           6 :                numitems * sizeof(EnumItem));
    2546           6 :     enumdata->bitmap_base = bitmap_base;
    2547           6 :     enumdata->sorted_values = bms_copy(bitmap);
    2548           6 :     enumdata->num_values = numitems;
    2549           6 :     memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
    2550           6 :     MemoryContextSwitchTo(oldcxt);
    2551             : 
    2552           6 :     pfree(items);
    2553           6 :     bms_free(bitmap);
    2554             : 
    2555             :     /* And link the finished cache struct into the typcache */
    2556           6 :     if (tcache->enumData != NULL)
    2557           0 :         pfree(tcache->enumData);
    2558           6 :     tcache->enumData = enumdata;
    2559           6 : }
    2560             : 
    2561             : /*
    2562             :  * Locate the EnumItem with the given OID, if present
    2563             :  */
    2564             : static EnumItem *
    2565      300104 : find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
    2566             : {
    2567             :     EnumItem    srch;
    2568             : 
    2569             :     /* On some versions of Solaris, bsearch of zero items dumps core */
    2570      300104 :     if (enumdata->num_values <= 0)
    2571           0 :         return NULL;
    2572             : 
    2573      300104 :     srch.enum_oid = arg;
    2574      300104 :     return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
    2575             :                    sizeof(EnumItem), enum_oid_cmp);
    2576             : }
    2577             : 
    2578             : /*
    2579             :  * qsort comparison function for OID-ordered EnumItems
    2580             :  */
    2581             : static int
    2582      600360 : enum_oid_cmp(const void *left, const void *right)
    2583             : {
    2584      600360 :     const EnumItem *l = (const EnumItem *) left;
    2585      600360 :     const EnumItem *r = (const EnumItem *) right;
    2586             : 
    2587      600360 :     if (l->enum_oid < r->enum_oid)
    2588      150132 :         return -1;
    2589      450228 :     else if (l->enum_oid > r->enum_oid)
    2590      150124 :         return 1;
    2591             :     else
    2592      300104 :         return 0;
    2593             : }
    2594             : 
    2595             : /*
    2596             :  * Copy 'tupdesc' into newly allocated shared memory in 'area', set its typmod
    2597             :  * to the given value and return a dsa_pointer.
    2598             :  */
    2599             : static dsa_pointer
    2600          36 : share_tupledesc(dsa_area *area, TupleDesc tupdesc, uint32 typmod)
    2601             : {
    2602             :     dsa_pointer shared_dp;
    2603             :     TupleDesc   shared;
    2604             : 
    2605          36 :     shared_dp = dsa_allocate(area, TupleDescSize(tupdesc));
    2606          36 :     shared = (TupleDesc) dsa_get_address(area, shared_dp);
    2607          36 :     TupleDescCopy(shared, tupdesc);
    2608          36 :     shared->tdtypmod = typmod;
    2609             : 
    2610          36 :     return shared_dp;
    2611             : }
    2612             : 
    2613             : /*
    2614             :  * If we are attached to a SharedRecordTypmodRegistry, use it to find or
    2615             :  * create a shared TupleDesc that matches 'tupdesc'.  Otherwise return NULL.
    2616             :  * Tuple descriptors returned by this function are not reference counted, and
    2617             :  * will exist at least as long as the current backend remained attached to the
    2618             :  * current session.
    2619             :  */
    2620             : static TupleDesc
    2621       13510 : find_or_make_matching_shared_tupledesc(TupleDesc tupdesc)
    2622             : {
    2623             :     TupleDesc   result;
    2624             :     SharedRecordTableKey key;
    2625             :     SharedRecordTableEntry *record_table_entry;
    2626             :     SharedTypmodTableEntry *typmod_table_entry;
    2627             :     dsa_pointer shared_dp;
    2628             :     bool        found;
    2629             :     uint32      typmod;
    2630             : 
    2631             :     /* If not even attached, nothing to do. */
    2632       13510 :     if (CurrentSession->shared_typmod_registry == NULL)
    2633       13466 :         return NULL;
    2634             : 
    2635             :     /* Try to find a matching tuple descriptor in the record table. */
    2636          44 :     key.shared = false;
    2637          44 :     key.u.local_tupdesc = tupdesc;
    2638             :     record_table_entry = (SharedRecordTableEntry *)
    2639          44 :         dshash_find(CurrentSession->shared_record_table, &key, false);
    2640          44 :     if (record_table_entry)
    2641             :     {
    2642             :         Assert(record_table_entry->key.shared);
    2643          12 :         dshash_release_lock(CurrentSession->shared_record_table,
    2644             :                             record_table_entry);
    2645             :         result = (TupleDesc)
    2646          12 :             dsa_get_address(CurrentSession->area,
    2647             :                             record_table_entry->key.u.shared_tupdesc);
    2648             :         Assert(result->tdrefcount == -1);
    2649             : 
    2650          12 :         return result;
    2651             :     }
    2652             : 
    2653             :     /* Allocate a new typmod number.  This will be wasted if we error out. */
    2654          32 :     typmod = (int)
    2655          32 :         pg_atomic_fetch_add_u32(&CurrentSession->shared_typmod_registry->next_typmod,
    2656             :                                 1);
    2657             : 
    2658             :     /* Copy the TupleDesc into shared memory. */
    2659          32 :     shared_dp = share_tupledesc(CurrentSession->area, tupdesc, typmod);
    2660             : 
    2661             :     /*
    2662             :      * Create an entry in the typmod table so that others will understand this
    2663             :      * typmod number.
    2664             :      */
    2665          32 :     PG_TRY();
    2666             :     {
    2667             :         typmod_table_entry = (SharedTypmodTableEntry *)
    2668          32 :             dshash_find_or_insert(CurrentSession->shared_typmod_table,
    2669             :                                   &typmod, &found);
    2670          32 :         if (found)
    2671           0 :             elog(ERROR, "cannot create duplicate shared record typmod");
    2672             :     }
    2673           0 :     PG_CATCH();
    2674             :     {
    2675           0 :         dsa_free(CurrentSession->area, shared_dp);
    2676           0 :         PG_RE_THROW();
    2677             :     }
    2678          32 :     PG_END_TRY();
    2679          32 :     typmod_table_entry->typmod = typmod;
    2680          32 :     typmod_table_entry->shared_tupdesc = shared_dp;
    2681          32 :     dshash_release_lock(CurrentSession->shared_typmod_table,
    2682             :                         typmod_table_entry);
    2683             : 
    2684             :     /*
    2685             :      * Finally create an entry in the record table so others with matching
    2686             :      * tuple descriptors can reuse the typmod.
    2687             :      */
    2688             :     record_table_entry = (SharedRecordTableEntry *)
    2689          32 :         dshash_find_or_insert(CurrentSession->shared_record_table, &key,
    2690             :                               &found);
    2691          32 :     if (found)
    2692             :     {
    2693             :         /*
    2694             :          * Someone concurrently inserted a matching tuple descriptor since the
    2695             :          * first time we checked.  Use that one instead.
    2696             :          */
    2697           0 :         dshash_release_lock(CurrentSession->shared_record_table,
    2698             :                             record_table_entry);
    2699             : 
    2700             :         /* Might as well free up the space used by the one we created. */
    2701           0 :         found = dshash_delete_key(CurrentSession->shared_typmod_table,
    2702             :                                   &typmod);
    2703             :         Assert(found);
    2704           0 :         dsa_free(CurrentSession->area, shared_dp);
    2705             : 
    2706             :         /* Return the one we found. */
    2707             :         Assert(record_table_entry->key.shared);
    2708             :         result = (TupleDesc)
    2709           0 :             dsa_get_address(CurrentSession->area,
    2710           0 :                             record_table_entry->key.shared);
    2711             :         Assert(result->tdrefcount == -1);
    2712             : 
    2713           0 :         return result;
    2714             :     }
    2715             : 
    2716             :     /* Store it and return it. */
    2717          32 :     record_table_entry->key.shared = true;
    2718          32 :     record_table_entry->key.u.shared_tupdesc = shared_dp;
    2719          32 :     dshash_release_lock(CurrentSession->shared_record_table,
    2720             :                         record_table_entry);
    2721             :     result = (TupleDesc)
    2722          32 :         dsa_get_address(CurrentSession->area, shared_dp);
    2723             :     Assert(result->tdrefcount == -1);
    2724             : 
    2725          32 :     return result;
    2726             : }
    2727             : 
    2728             : /*
    2729             :  * On-DSM-detach hook to forget about the current shared record typmod
    2730             :  * infrastructure.  This is currently used by both leader and workers.
    2731             :  */
    2732             : static void
    2733        1630 : shared_record_typmod_registry_detach(dsm_segment *segment, Datum datum)
    2734             : {
    2735             :     /* Be cautious here: maybe we didn't finish initializing. */
    2736        1630 :     if (CurrentSession->shared_record_table != NULL)
    2737             :     {
    2738        1630 :         dshash_detach(CurrentSession->shared_record_table);
    2739        1630 :         CurrentSession->shared_record_table = NULL;
    2740             :     }
    2741        1630 :     if (CurrentSession->shared_typmod_table != NULL)
    2742             :     {
    2743        1630 :         dshash_detach(CurrentSession->shared_typmod_table);
    2744        1630 :         CurrentSession->shared_typmod_table = NULL;
    2745             :     }
    2746        1630 :     CurrentSession->shared_typmod_registry = NULL;
    2747        1630 : }

Generated by: LCOV version 1.13