LCOV - code coverage report
Current view: top level - src/backend/executor - execPartition.c (source / functions) Hit Total Coverage
Test: PostgreSQL 19devel Lines: 672 704 95.5 %
Date: 2025-12-12 01:18:21 Functions: 19 19 100.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * execPartition.c
       4             :  *    Support routines for partitioning.
       5             :  *
       6             :  * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  * IDENTIFICATION
      10             :  *    src/backend/executor/execPartition.c
      11             :  *
      12             :  *-------------------------------------------------------------------------
      13             :  */
      14             : #include "postgres.h"
      15             : 
      16             : #include "access/table.h"
      17             : #include "access/tableam.h"
      18             : #include "catalog/index.h"
      19             : #include "catalog/partition.h"
      20             : #include "executor/execPartition.h"
      21             : #include "executor/executor.h"
      22             : #include "executor/nodeModifyTable.h"
      23             : #include "foreign/fdwapi.h"
      24             : #include "mb/pg_wchar.h"
      25             : #include "miscadmin.h"
      26             : #include "partitioning/partbounds.h"
      27             : #include "partitioning/partdesc.h"
      28             : #include "partitioning/partprune.h"
      29             : #include "rewrite/rewriteManip.h"
      30             : #include "utils/acl.h"
      31             : #include "utils/lsyscache.h"
      32             : #include "utils/partcache.h"
      33             : #include "utils/rls.h"
      34             : #include "utils/ruleutils.h"
      35             : 
      36             : 
      37             : /*-----------------------
      38             :  * PartitionTupleRouting - Encapsulates all information required to
      39             :  * route a tuple inserted into a partitioned table to one of its leaf
      40             :  * partitions.
      41             :  *
      42             :  * partition_root
      43             :  *      The partitioned table that's the target of the command.
      44             :  *
      45             :  * partition_dispatch_info
      46             :  *      Array of 'max_dispatch' elements containing a pointer to a
      47             :  *      PartitionDispatch object for every partitioned table touched by tuple
      48             :  *      routing.  The entry for the target partitioned table is *always*
      49             :  *      present in the 0th element of this array.  See comment for
      50             :  *      PartitionDispatchData->indexes for details on how this array is
      51             :  *      indexed.
      52             :  *
      53             :  * nonleaf_partitions
      54             :  *      Array of 'max_dispatch' elements containing pointers to fake
      55             :  *      ResultRelInfo objects for nonleaf partitions, useful for checking
      56             :  *      the partition constraint.
      57             :  *
      58             :  * num_dispatch
      59             :  *      The current number of items stored in the 'partition_dispatch_info'
      60             :  *      array.  Also serves as the index of the next free array element for
      61             :  *      new PartitionDispatch objects that need to be stored.
      62             :  *
      63             :  * max_dispatch
      64             :  *      The current allocated size of the 'partition_dispatch_info' array.
      65             :  *
      66             :  * partitions
      67             :  *      Array of 'max_partitions' elements containing a pointer to a
      68             :  *      ResultRelInfo for every leaf partition touched by tuple routing.
      69             :  *      Some of these are pointers to ResultRelInfos which are borrowed out of
      70             :  *      the owning ModifyTableState node.  The remainder have been built
      71             :  *      especially for tuple routing.  See comment for
      72             :  *      PartitionDispatchData->indexes for details on how this array is
      73             :  *      indexed.
      74             :  *
      75             :  * is_borrowed_rel
      76             :  *      Array of 'max_partitions' booleans recording whether a given entry
      77             :  *      in 'partitions' is a ResultRelInfo pointer borrowed from the owning
      78             :  *      ModifyTableState node, rather than being built here.
      79             :  *
      80             :  * num_partitions
      81             :  *      The current number of items stored in the 'partitions' array.  Also
      82             :  *      serves as the index of the next free array element for new
      83             :  *      ResultRelInfo objects that need to be stored.
      84             :  *
      85             :  * max_partitions
      86             :  *      The current allocated size of the 'partitions' array.
      87             :  *
      88             :  * memcxt
      89             :  *      Memory context used to allocate subsidiary structs.
      90             :  *-----------------------
      91             :  */
      92             : struct PartitionTupleRouting
      93             : {
      94             :     Relation    partition_root;
      95             :     PartitionDispatch *partition_dispatch_info;
      96             :     ResultRelInfo **nonleaf_partitions;
      97             :     int         num_dispatch;
      98             :     int         max_dispatch;
      99             :     ResultRelInfo **partitions;
     100             :     bool       *is_borrowed_rel;
     101             :     int         num_partitions;
     102             :     int         max_partitions;
     103             :     MemoryContext memcxt;
     104             : };
     105             : 
     106             : /*-----------------------
     107             :  * PartitionDispatch - information about one partitioned table in a partition
     108             :  * hierarchy required to route a tuple to any of its partitions.  A
     109             :  * PartitionDispatch is always encapsulated inside a PartitionTupleRouting
     110             :  * struct and stored inside its 'partition_dispatch_info' array.
     111             :  *
     112             :  * reldesc
     113             :  *      Relation descriptor of the table
     114             :  *
     115             :  * key
     116             :  *      Partition key information of the table
     117             :  *
     118             :  * keystate
     119             :  *      Execution state required for expressions in the partition key
     120             :  *
     121             :  * partdesc
     122             :  *      Partition descriptor of the table
     123             :  *
     124             :  * tupslot
     125             :  *      A standalone TupleTableSlot initialized with this table's tuple
     126             :  *      descriptor, or NULL if no tuple conversion between the parent is
     127             :  *      required.
     128             :  *
     129             :  * tupmap
     130             :  *      TupleConversionMap to convert from the parent's rowtype to this table's
     131             :  *      rowtype  (when extracting the partition key of a tuple just before
     132             :  *      routing it through this table). A NULL value is stored if no tuple
     133             :  *      conversion is required.
     134             :  *
     135             :  * indexes
     136             :  *      Array of partdesc->nparts elements.  For leaf partitions the index
     137             :  *      corresponds to the partition's ResultRelInfo in the encapsulating
     138             :  *      PartitionTupleRouting's partitions array.  For partitioned partitions,
     139             :  *      the index corresponds to the PartitionDispatch for it in its
     140             :  *      partition_dispatch_info array.  -1 indicates we've not yet allocated
     141             :  *      anything in PartitionTupleRouting for the partition.
     142             :  *-----------------------
     143             :  */
     144             : typedef struct PartitionDispatchData
     145             : {
     146             :     Relation    reldesc;
     147             :     PartitionKey key;
     148             :     List       *keystate;       /* list of ExprState */
     149             :     PartitionDesc partdesc;
     150             :     TupleTableSlot *tupslot;
     151             :     AttrMap    *tupmap;
     152             :     int         indexes[FLEXIBLE_ARRAY_MEMBER];
     153             : }           PartitionDispatchData;
     154             : 
     155             : 
     156             : static ResultRelInfo *ExecInitPartitionInfo(ModifyTableState *mtstate,
     157             :                                             EState *estate, PartitionTupleRouting *proute,
     158             :                                             PartitionDispatch dispatch,
     159             :                                             ResultRelInfo *rootResultRelInfo,
     160             :                                             int partidx);
     161             : static void ExecInitRoutingInfo(ModifyTableState *mtstate,
     162             :                                 EState *estate,
     163             :                                 PartitionTupleRouting *proute,
     164             :                                 PartitionDispatch dispatch,
     165             :                                 ResultRelInfo *partRelInfo,
     166             :                                 int partidx,
     167             :                                 bool is_borrowed_rel);
     168             : static PartitionDispatch ExecInitPartitionDispatchInfo(EState *estate,
     169             :                                                        PartitionTupleRouting *proute,
     170             :                                                        Oid partoid, PartitionDispatch parent_pd,
     171             :                                                        int partidx, ResultRelInfo *rootResultRelInfo);
     172             : static void FormPartitionKeyDatum(PartitionDispatch pd,
     173             :                                   TupleTableSlot *slot,
     174             :                                   EState *estate,
     175             :                                   Datum *values,
     176             :                                   bool *isnull);
     177             : static int  get_partition_for_tuple(PartitionDispatch pd, const Datum *values,
     178             :                                     const bool *isnull);
     179             : static char *ExecBuildSlotPartitionKeyDescription(Relation rel,
     180             :                                                   const Datum *values,
     181             :                                                   const bool *isnull,
     182             :                                                   int maxfieldlen);
     183             : static List *adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri);
     184             : static List *adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap);
     185             : static PartitionPruneState *CreatePartitionPruneState(EState *estate,
     186             :                                                       PartitionPruneInfo *pruneinfo,
     187             :                                                       Bitmapset **all_leafpart_rtis);
     188             : static void InitPartitionPruneContext(PartitionPruneContext *context,
     189             :                                       List *pruning_steps,
     190             :                                       PartitionDesc partdesc,
     191             :                                       PartitionKey partkey,
     192             :                                       PlanState *planstate,
     193             :                                       ExprContext *econtext);
     194             : static void InitExecPartitionPruneContexts(PartitionPruneState *prunestate,
     195             :                                            PlanState *parent_plan,
     196             :                                            Bitmapset *initially_valid_subplans,
     197             :                                            int n_total_subplans);
     198             : static void find_matching_subplans_recurse(PartitionPruningData *prunedata,
     199             :                                            PartitionedRelPruningData *pprune,
     200             :                                            bool initial_prune,
     201             :                                            Bitmapset **validsubplans,
     202             :                                            Bitmapset **validsubplan_rtis);
     203             : 
     204             : 
     205             : /*
     206             :  * ExecSetupPartitionTupleRouting - sets up information needed during
     207             :  * tuple routing for partitioned tables, encapsulates it in
     208             :  * PartitionTupleRouting, and returns it.
     209             :  *
     210             :  * Callers must use the returned PartitionTupleRouting during calls to
     211             :  * ExecFindPartition().  The actual ResultRelInfo for a partition is only
     212             :  * allocated when the partition is found for the first time.
     213             :  *
     214             :  * The current memory context is used to allocate this struct and all
     215             :  * subsidiary structs that will be allocated from it later on.  Typically
     216             :  * it should be estate->es_query_cxt.
     217             :  */
     218             : PartitionTupleRouting *
     219        7172 : ExecSetupPartitionTupleRouting(EState *estate, Relation rel)
     220             : {
     221             :     PartitionTupleRouting *proute;
     222             : 
     223             :     /*
     224             :      * Here we attempt to expend as little effort as possible in setting up
     225             :      * the PartitionTupleRouting.  Each partition's ResultRelInfo is built on
     226             :      * demand, only when we actually need to route a tuple to that partition.
     227             :      * The reason for this is that a common case is for INSERT to insert a
     228             :      * single tuple into a partitioned table and this must be fast.
     229             :      */
     230        7172 :     proute = palloc0_object(PartitionTupleRouting);
     231        7172 :     proute->partition_root = rel;
     232        7172 :     proute->memcxt = CurrentMemoryContext;
     233             :     /* Rest of members initialized by zeroing */
     234             : 
     235             :     /*
     236             :      * Initialize this table's PartitionDispatch object.  Here we pass in the
     237             :      * parent as NULL as we don't need to care about any parent of the target
     238             :      * partitioned table.
     239             :      */
     240        7172 :     ExecInitPartitionDispatchInfo(estate, proute, RelationGetRelid(rel),
     241             :                                   NULL, 0, NULL);
     242             : 
     243        7172 :     return proute;
     244             : }
     245             : 
     246             : /*
     247             :  * ExecFindPartition -- Return the ResultRelInfo for the leaf partition that
     248             :  * the tuple contained in *slot should belong to.
     249             :  *
     250             :  * If the partition's ResultRelInfo does not yet exist in 'proute' then we set
     251             :  * one up or reuse one from mtstate's resultRelInfo array.  When reusing a
     252             :  * ResultRelInfo from the mtstate we verify that the relation is a valid
     253             :  * target for INSERTs and initialize tuple routing information.
     254             :  *
     255             :  * rootResultRelInfo is the relation named in the query.
     256             :  *
     257             :  * estate must be non-NULL; we'll need it to compute any expressions in the
     258             :  * partition keys.  Also, its per-tuple contexts are used as evaluation
     259             :  * scratch space.
     260             :  *
     261             :  * If no leaf partition is found, this routine errors out with the appropriate
     262             :  * error message.  An error may also be raised if the found target partition
     263             :  * is not a valid target for an INSERT.
     264             :  */
     265             : ResultRelInfo *
     266     1033684 : ExecFindPartition(ModifyTableState *mtstate,
     267             :                   ResultRelInfo *rootResultRelInfo,
     268             :                   PartitionTupleRouting *proute,
     269             :                   TupleTableSlot *slot, EState *estate)
     270             : {
     271     1033684 :     PartitionDispatch *pd = proute->partition_dispatch_info;
     272             :     Datum       values[PARTITION_MAX_KEYS];
     273             :     bool        isnull[PARTITION_MAX_KEYS];
     274             :     Relation    rel;
     275             :     PartitionDispatch dispatch;
     276             :     PartitionDesc partdesc;
     277     1033684 :     ExprContext *ecxt = GetPerTupleExprContext(estate);
     278     1033684 :     TupleTableSlot *ecxt_scantuple_saved = ecxt->ecxt_scantuple;
     279     1033684 :     TupleTableSlot *rootslot = slot;
     280     1033684 :     TupleTableSlot *myslot = NULL;
     281             :     MemoryContext oldcxt;
     282     1033684 :     ResultRelInfo *rri = NULL;
     283             : 
     284             :     /* use per-tuple context here to avoid leaking memory */
     285     1033684 :     oldcxt = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
     286             : 
     287             :     /*
     288             :      * First check the root table's partition constraint, if any.  No point in
     289             :      * routing the tuple if it doesn't belong in the root table itself.
     290             :      */
     291     1033684 :     if (rootResultRelInfo->ri_RelationDesc->rd_rel->relispartition)
     292        4502 :         ExecPartitionCheck(rootResultRelInfo, slot, estate, true);
     293             : 
     294             :     /* start with the root partitioned table */
     295     1033652 :     dispatch = pd[0];
     296     2183608 :     while (dispatch != NULL)
     297             :     {
     298     1150148 :         int         partidx = -1;
     299             :         bool        is_leaf;
     300             : 
     301     1150148 :         CHECK_FOR_INTERRUPTS();
     302             : 
     303     1150148 :         rel = dispatch->reldesc;
     304     1150148 :         partdesc = dispatch->partdesc;
     305             : 
     306             :         /*
     307             :          * Extract partition key from tuple. Expression evaluation machinery
     308             :          * that FormPartitionKeyDatum() invokes expects ecxt_scantuple to
     309             :          * point to the correct tuple slot.  The slot might have changed from
     310             :          * what was used for the parent table if the table of the current
     311             :          * partitioning level has different tuple descriptor from the parent.
     312             :          * So update ecxt_scantuple accordingly.
     313             :          */
     314     1150148 :         ecxt->ecxt_scantuple = slot;
     315     1150148 :         FormPartitionKeyDatum(dispatch, slot, estate, values, isnull);
     316             : 
     317             :         /*
     318             :          * If this partitioned table has no partitions or no partition for
     319             :          * these values, error out.
     320             :          */
     321     2300242 :         if (partdesc->nparts == 0 ||
     322     1150106 :             (partidx = get_partition_for_tuple(dispatch, values, isnull)) < 0)
     323             :         {
     324             :             char       *val_desc;
     325             : 
     326         154 :             val_desc = ExecBuildSlotPartitionKeyDescription(rel,
     327             :                                                             values, isnull, 64);
     328             :             Assert(OidIsValid(RelationGetRelid(rel)));
     329         154 :             ereport(ERROR,
     330             :                     (errcode(ERRCODE_CHECK_VIOLATION),
     331             :                      errmsg("no partition of relation \"%s\" found for row",
     332             :                             RelationGetRelationName(rel)),
     333             :                      val_desc ?
     334             :                      errdetail("Partition key of the failing row contains %s.",
     335             :                                val_desc) : 0,
     336             :                      errtable(rel)));
     337             :         }
     338             : 
     339     1149982 :         is_leaf = partdesc->is_leaf[partidx];
     340     1149982 :         if (is_leaf)
     341             :         {
     342             :             /*
     343             :              * We've reached the leaf -- hurray, we're done.  Look to see if
     344             :              * we've already got a ResultRelInfo for this partition.
     345             :              */
     346     1033484 :             if (likely(dispatch->indexes[partidx] >= 0))
     347             :             {
     348             :                 /* ResultRelInfo already built */
     349             :                 Assert(dispatch->indexes[partidx] < proute->num_partitions);
     350     1024440 :                 rri = proute->partitions[dispatch->indexes[partidx]];
     351             :             }
     352             :             else
     353             :             {
     354             :                 /*
     355             :                  * If the partition is known in the owning ModifyTableState
     356             :                  * node, we can re-use that ResultRelInfo instead of creating
     357             :                  * a new one with ExecInitPartitionInfo().
     358             :                  */
     359        9044 :                 rri = ExecLookupResultRelByOid(mtstate,
     360        9044 :                                                partdesc->oids[partidx],
     361             :                                                true, false);
     362        9044 :                 if (rri)
     363             :                 {
     364         508 :                     ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
     365             : 
     366             :                     /* Verify this ResultRelInfo allows INSERTs */
     367         508 :                     CheckValidResultRel(rri, CMD_INSERT,
     368             :                                         node ? node->onConflictAction : ONCONFLICT_NONE,
     369             :                                         NIL);
     370             : 
     371             :                     /*
     372             :                      * Initialize information needed to insert this and
     373             :                      * subsequent tuples routed to this partition.
     374             :                      */
     375         508 :                     ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
     376             :                                         rri, partidx, true);
     377             :                 }
     378             :                 else
     379             :                 {
     380             :                     /* We need to create a new one. */
     381        8536 :                     rri = ExecInitPartitionInfo(mtstate, estate, proute,
     382             :                                                 dispatch,
     383             :                                                 rootResultRelInfo, partidx);
     384             :                 }
     385             :             }
     386             :             Assert(rri != NULL);
     387             : 
     388             :             /* Signal to terminate the loop */
     389     1033460 :             dispatch = NULL;
     390             :         }
     391             :         else
     392             :         {
     393             :             /*
     394             :              * Partition is a sub-partitioned table; get the PartitionDispatch
     395             :              */
     396      116498 :             if (likely(dispatch->indexes[partidx] >= 0))
     397             :             {
     398             :                 /* Already built. */
     399             :                 Assert(dispatch->indexes[partidx] < proute->num_dispatch);
     400             : 
     401      115310 :                 rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
     402             : 
     403             :                 /*
     404             :                  * Move down to the next partition level and search again
     405             :                  * until we find a leaf partition that matches this tuple
     406             :                  */
     407      115310 :                 dispatch = pd[dispatch->indexes[partidx]];
     408             :             }
     409             :             else
     410             :             {
     411             :                 /* Not yet built. Do that now. */
     412             :                 PartitionDispatch subdispatch;
     413             : 
     414             :                 /*
     415             :                  * Create the new PartitionDispatch.  We pass the current one
     416             :                  * in as the parent PartitionDispatch
     417             :                  */
     418        1188 :                 subdispatch = ExecInitPartitionDispatchInfo(estate,
     419             :                                                             proute,
     420        1188 :                                                             partdesc->oids[partidx],
     421             :                                                             dispatch, partidx,
     422             :                                                             mtstate->rootResultRelInfo);
     423             :                 Assert(dispatch->indexes[partidx] >= 0 &&
     424             :                        dispatch->indexes[partidx] < proute->num_dispatch);
     425             : 
     426        1188 :                 rri = proute->nonleaf_partitions[dispatch->indexes[partidx]];
     427        1188 :                 dispatch = subdispatch;
     428             :             }
     429             : 
     430             :             /*
     431             :              * Convert the tuple to the new parent's layout, if different from
     432             :              * the previous parent.
     433             :              */
     434      116498 :             if (dispatch->tupslot)
     435             :             {
     436       61716 :                 AttrMap    *map = dispatch->tupmap;
     437       61716 :                 TupleTableSlot *tempslot = myslot;
     438             : 
     439       61716 :                 myslot = dispatch->tupslot;
     440       61716 :                 slot = execute_attr_map_slot(map, slot, myslot);
     441             : 
     442       61716 :                 if (tempslot != NULL)
     443         294 :                     ExecClearTuple(tempslot);
     444             :             }
     445             :         }
     446             : 
     447             :         /*
     448             :          * If this partition is the default one, we must check its partition
     449             :          * constraint now, which may have changed concurrently due to
     450             :          * partitions being added to the parent.
     451             :          *
     452             :          * (We do this here, and do not rely on ExecInsert doing it, because
     453             :          * we don't want to miss doing it for non-leaf partitions.)
     454             :          */
     455     1149958 :         if (partidx == partdesc->boundinfo->default_index)
     456             :         {
     457             :             /*
     458             :              * The tuple must match the partition's layout for the constraint
     459             :              * expression to be evaluated successfully.  If the partition is
     460             :              * sub-partitioned, that would already be the case due to the code
     461             :              * above, but for a leaf partition the tuple still matches the
     462             :              * parent's layout.
     463             :              *
     464             :              * Note that we have a map to convert from root to current
     465             :              * partition, but not from immediate parent to current partition.
     466             :              * So if we have to convert, do it from the root slot; if not, use
     467             :              * the root slot as-is.
     468             :              */
     469         596 :             if (is_leaf)
     470             :             {
     471         552 :                 TupleConversionMap *map = ExecGetRootToChildMap(rri, estate);
     472             : 
     473         552 :                 if (map)
     474         162 :                     slot = execute_attr_map_slot(map->attrMap, rootslot,
     475             :                                                  rri->ri_PartitionTupleSlot);
     476             :                 else
     477         390 :                     slot = rootslot;
     478             :             }
     479             : 
     480         596 :             ExecPartitionCheck(rri, slot, estate, true);
     481             :         }
     482             :     }
     483             : 
     484             :     /* Release the tuple in the lowest parent's dedicated slot. */
     485     1033460 :     if (myslot != NULL)
     486       61384 :         ExecClearTuple(myslot);
     487             :     /* and restore ecxt's scantuple */
     488     1033460 :     ecxt->ecxt_scantuple = ecxt_scantuple_saved;
     489     1033460 :     MemoryContextSwitchTo(oldcxt);
     490             : 
     491     1033460 :     return rri;
     492             : }
     493             : 
     494             : /*
     495             :  * IsIndexCompatibleAsArbiter
     496             :  *      Return true if two indexes are identical for INSERT ON CONFLICT
     497             :  *      purposes.
     498             :  *
     499             :  * Only indexes of the same relation are supported.
     500             :  */
     501             : static bool
     502          24 : IsIndexCompatibleAsArbiter(Relation arbiterIndexRelation,
     503             :                            IndexInfo *arbiterIndexInfo,
     504             :                            Relation indexRelation,
     505             :                            IndexInfo *indexInfo)
     506             : {
     507             :     Assert(arbiterIndexRelation->rd_index->indrelid == indexRelation->rd_index->indrelid);
     508             : 
     509             :     /* must match whether they're unique */
     510          24 :     if (arbiterIndexInfo->ii_Unique != indexInfo->ii_Unique)
     511           0 :         return false;
     512             : 
     513             :     /* No support currently for comparing exclusion indexes. */
     514          24 :     if (arbiterIndexInfo->ii_ExclusionOps != NULL ||
     515          24 :         indexInfo->ii_ExclusionOps != NULL)
     516           0 :         return false;
     517             : 
     518             :     /* the "nulls not distinct" criterion must match */
     519          24 :     if (arbiterIndexInfo->ii_NullsNotDistinct !=
     520          24 :         indexInfo->ii_NullsNotDistinct)
     521           0 :         return false;
     522             : 
     523             :     /* number of key attributes must match */
     524          24 :     if (arbiterIndexInfo->ii_NumIndexKeyAttrs !=
     525          24 :         indexInfo->ii_NumIndexKeyAttrs)
     526           0 :         return false;
     527             : 
     528          36 :     for (int i = 0; i < arbiterIndexInfo->ii_NumIndexKeyAttrs; i++)
     529             :     {
     530          24 :         if (arbiterIndexRelation->rd_indcollation[i] !=
     531          24 :             indexRelation->rd_indcollation[i])
     532          12 :             return false;
     533             : 
     534          12 :         if (arbiterIndexRelation->rd_opfamily[i] !=
     535          12 :             indexRelation->rd_opfamily[i])
     536           0 :             return false;
     537             : 
     538          12 :         if (arbiterIndexRelation->rd_index->indkey.values[i] !=
     539          12 :             indexRelation->rd_index->indkey.values[i])
     540           0 :             return false;
     541             :     }
     542             : 
     543          12 :     if (list_difference(RelationGetIndexExpressions(arbiterIndexRelation),
     544          12 :                         RelationGetIndexExpressions(indexRelation)) != NIL)
     545           0 :         return false;
     546             : 
     547          12 :     if (list_difference(RelationGetIndexPredicate(arbiterIndexRelation),
     548          12 :                         RelationGetIndexPredicate(indexRelation)) != NIL)
     549           0 :         return false;
     550          12 :     return true;
     551             : }
     552             : 
     553             : /*
     554             :  * ExecInitPartitionInfo
     555             :  *      Lock the partition and initialize ResultRelInfo.  Also setup other
     556             :  *      information for the partition and store it in the next empty slot in
     557             :  *      the proute->partitions array.
     558             :  *
     559             :  * Returns the ResultRelInfo
     560             :  */
     561             : static ResultRelInfo *
     562        8536 : ExecInitPartitionInfo(ModifyTableState *mtstate, EState *estate,
     563             :                       PartitionTupleRouting *proute,
     564             :                       PartitionDispatch dispatch,
     565             :                       ResultRelInfo *rootResultRelInfo,
     566             :                       int partidx)
     567             : {
     568        8536 :     ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
     569        8536 :     Oid         partOid = dispatch->partdesc->oids[partidx];
     570             :     Relation    partrel;
     571        8536 :     int         firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
     572        8536 :     Relation    firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
     573             :     ResultRelInfo *leaf_part_rri;
     574             :     MemoryContext oldcxt;
     575        8536 :     AttrMap    *part_attmap = NULL;
     576             :     bool        found_whole_row;
     577             : 
     578        8536 :     oldcxt = MemoryContextSwitchTo(proute->memcxt);
     579             : 
     580        8536 :     partrel = table_open(partOid, RowExclusiveLock);
     581             : 
     582        8536 :     leaf_part_rri = makeNode(ResultRelInfo);
     583        8536 :     InitResultRelInfo(leaf_part_rri,
     584             :                       partrel,
     585             :                       0,
     586             :                       rootResultRelInfo,
     587             :                       estate->es_instrument);
     588             : 
     589             :     /*
     590             :      * Verify result relation is a valid target for an INSERT.  An UPDATE of a
     591             :      * partition-key becomes a DELETE+INSERT operation, so this check is still
     592             :      * required when the operation is CMD_UPDATE.
     593             :      */
     594        8536 :     CheckValidResultRel(leaf_part_rri, CMD_INSERT,
     595             :                         node ? node->onConflictAction : ONCONFLICT_NONE, NIL);
     596             : 
     597             :     /*
     598             :      * Open partition indices.  The user may have asked to check for conflicts
     599             :      * within this leaf partition and do "nothing" instead of throwing an
     600             :      * error.  Be prepared in that case by initializing the index information
     601             :      * needed by ExecInsert() to perform speculative insertions.
     602             :      */
     603        8524 :     if (partrel->rd_rel->relhasindex &&
     604        1934 :         leaf_part_rri->ri_IndexRelationDescs == NULL)
     605        1934 :         ExecOpenIndices(leaf_part_rri,
     606        3670 :                         (node != NULL &&
     607        1736 :                          node->onConflictAction != ONCONFLICT_NONE));
     608             : 
     609             :     /*
     610             :      * Build WITH CHECK OPTION constraints for the partition.  Note that we
     611             :      * didn't build the withCheckOptionList for partitions within the planner,
     612             :      * but simple translation of varattnos will suffice.  This only occurs for
     613             :      * the INSERT case or in the case of UPDATE/MERGE tuple routing where we
     614             :      * didn't find a result rel to reuse.
     615             :      */
     616        8524 :     if (node && node->withCheckOptionLists != NIL)
     617             :     {
     618             :         List       *wcoList;
     619          96 :         List       *wcoExprs = NIL;
     620             :         ListCell   *ll;
     621             : 
     622             :         /*
     623             :          * In the case of INSERT on a partitioned table, there is only one
     624             :          * plan.  Likewise, there is only one WCO list, not one per partition.
     625             :          * For UPDATE/MERGE, there are as many WCO lists as there are plans.
     626             :          */
     627             :         Assert((node->operation == CMD_INSERT &&
     628             :                 list_length(node->withCheckOptionLists) == 1 &&
     629             :                 list_length(node->resultRelations) == 1) ||
     630             :                (node->operation == CMD_UPDATE &&
     631             :                 list_length(node->withCheckOptionLists) ==
     632             :                 list_length(node->resultRelations)) ||
     633             :                (node->operation == CMD_MERGE &&
     634             :                 list_length(node->withCheckOptionLists) ==
     635             :                 list_length(node->resultRelations)));
     636             : 
     637             :         /*
     638             :          * Use the WCO list of the first plan as a reference to calculate
     639             :          * attno's for the WCO list of this partition.  In the INSERT case,
     640             :          * that refers to the root partitioned table, whereas in the UPDATE
     641             :          * tuple routing case, that refers to the first partition in the
     642             :          * mtstate->resultRelInfo array.  In any case, both that relation and
     643             :          * this partition should have the same columns, so we should be able
     644             :          * to map attributes successfully.
     645             :          */
     646          96 :         wcoList = linitial(node->withCheckOptionLists);
     647             : 
     648             :         /*
     649             :          * Convert Vars in it to contain this partition's attribute numbers.
     650             :          */
     651             :         part_attmap =
     652          96 :             build_attrmap_by_name(RelationGetDescr(partrel),
     653             :                                   RelationGetDescr(firstResultRel),
     654             :                                   false);
     655             :         wcoList = (List *)
     656          96 :             map_variable_attnos((Node *) wcoList,
     657             :                                 firstVarno, 0,
     658             :                                 part_attmap,
     659          96 :                                 RelationGetForm(partrel)->reltype,
     660             :                                 &found_whole_row);
     661             :         /* We ignore the value of found_whole_row. */
     662             : 
     663         270 :         foreach(ll, wcoList)
     664             :         {
     665         174 :             WithCheckOption *wco = lfirst_node(WithCheckOption, ll);
     666         174 :             ExprState  *wcoExpr = ExecInitQual(castNode(List, wco->qual),
     667             :                                                &mtstate->ps);
     668             : 
     669         174 :             wcoExprs = lappend(wcoExprs, wcoExpr);
     670             :         }
     671             : 
     672          96 :         leaf_part_rri->ri_WithCheckOptions = wcoList;
     673          96 :         leaf_part_rri->ri_WithCheckOptionExprs = wcoExprs;
     674             :     }
     675             : 
     676             :     /*
     677             :      * Build the RETURNING projection for the partition.  Note that we didn't
     678             :      * build the returningList for partitions within the planner, but simple
     679             :      * translation of varattnos will suffice.  This only occurs for the INSERT
     680             :      * case or in the case of UPDATE/MERGE tuple routing where we didn't find
     681             :      * a result rel to reuse.
     682             :      */
     683        8524 :     if (node && node->returningLists != NIL)
     684             :     {
     685             :         TupleTableSlot *slot;
     686             :         ExprContext *econtext;
     687             :         List       *returningList;
     688             : 
     689             :         /* See the comment above for WCO lists. */
     690             :         Assert((node->operation == CMD_INSERT &&
     691             :                 list_length(node->returningLists) == 1 &&
     692             :                 list_length(node->resultRelations) == 1) ||
     693             :                (node->operation == CMD_UPDATE &&
     694             :                 list_length(node->returningLists) ==
     695             :                 list_length(node->resultRelations)) ||
     696             :                (node->operation == CMD_MERGE &&
     697             :                 list_length(node->returningLists) ==
     698             :                 list_length(node->resultRelations)));
     699             : 
     700             :         /*
     701             :          * Use the RETURNING list of the first plan as a reference to
     702             :          * calculate attno's for the RETURNING list of this partition.  See
     703             :          * the comment above for WCO lists for more details on why this is
     704             :          * okay.
     705             :          */
     706         212 :         returningList = linitial(node->returningLists);
     707             : 
     708             :         /*
     709             :          * Convert Vars in it to contain this partition's attribute numbers.
     710             :          */
     711         212 :         if (part_attmap == NULL)
     712             :             part_attmap =
     713         212 :                 build_attrmap_by_name(RelationGetDescr(partrel),
     714             :                                       RelationGetDescr(firstResultRel),
     715             :                                       false);
     716             :         returningList = (List *)
     717         212 :             map_variable_attnos((Node *) returningList,
     718             :                                 firstVarno, 0,
     719             :                                 part_attmap,
     720         212 :                                 RelationGetForm(partrel)->reltype,
     721             :                                 &found_whole_row);
     722             :         /* We ignore the value of found_whole_row. */
     723             : 
     724         212 :         leaf_part_rri->ri_returningList = returningList;
     725             : 
     726             :         /*
     727             :          * Initialize the projection itself.
     728             :          *
     729             :          * Use the slot and the expression context that would have been set up
     730             :          * in ExecInitModifyTable() for projection's output.
     731             :          */
     732             :         Assert(mtstate->ps.ps_ResultTupleSlot != NULL);
     733         212 :         slot = mtstate->ps.ps_ResultTupleSlot;
     734             :         Assert(mtstate->ps.ps_ExprContext != NULL);
     735         212 :         econtext = mtstate->ps.ps_ExprContext;
     736         212 :         leaf_part_rri->ri_projectReturning =
     737         212 :             ExecBuildProjectionInfo(returningList, econtext, slot,
     738             :                                     &mtstate->ps, RelationGetDescr(partrel));
     739             :     }
     740             : 
     741             :     /* Set up information needed for routing tuples to the partition. */
     742        8524 :     ExecInitRoutingInfo(mtstate, estate, proute, dispatch,
     743             :                         leaf_part_rri, partidx, false);
     744             : 
     745             :     /*
     746             :      * If there is an ON CONFLICT clause, initialize state for it.
     747             :      */
     748        8524 :     if (node && node->onConflictAction != ONCONFLICT_NONE)
     749             :     {
     750         246 :         TupleDesc   partrelDesc = RelationGetDescr(partrel);
     751         246 :         ExprContext *econtext = mtstate->ps.ps_ExprContext;
     752         246 :         List       *arbiterIndexes = NIL;
     753         246 :         int         additional_arbiters = 0;
     754             : 
     755             :         /*
     756             :          * If there is a list of arbiter indexes, map it to a list of indexes
     757             :          * in the partition.  We also add any "identical indexes" to any of
     758             :          * those, to cover the case where one of them is concurrently being
     759             :          * reindexed.
     760             :          */
     761         246 :         if (rootResultRelInfo->ri_onConflictArbiterIndexes != NIL)
     762             :         {
     763         190 :             List       *unparented_idxs = NIL,
     764         190 :                        *arbiters_listidxs = NIL;
     765             : 
     766         404 :             for (int listidx = 0; listidx < leaf_part_rri->ri_NumIndices; listidx++)
     767             :             {
     768             :                 Oid         indexoid;
     769             :                 List       *ancestors;
     770             : 
     771             :                 /*
     772             :                  * If one of this index's ancestors is in the root's arbiter
     773             :                  * list, then use this index as arbiter for this partition.
     774             :                  * Otherwise, if this index has no parent, track it for later,
     775             :                  * in case REINDEX CONCURRENTLY is working on one of the
     776             :                  * arbiters.
     777             :                  *
     778             :                  * XXX get_partition_ancestors is slow: it scans pg_inherits
     779             :                  * each time.  Consider a syscache or some other way to cache?
     780             :                  */
     781         214 :                 indexoid = RelationGetRelid(leaf_part_rri->ri_IndexRelationDescs[listidx]);
     782         214 :                 ancestors = get_partition_ancestors(indexoid);
     783         214 :                 if (ancestors != NIL)
     784             :                 {
     785         380 :                     foreach_oid(parent_idx, rootResultRelInfo->ri_onConflictArbiterIndexes)
     786             :                     {
     787         190 :                         if (list_member_oid(ancestors, parent_idx))
     788             :                         {
     789         190 :                             arbiterIndexes = lappend_oid(arbiterIndexes, indexoid);
     790         190 :                             arbiters_listidxs = lappend_int(arbiters_listidxs, listidx);
     791         190 :                             break;
     792             :                         }
     793             :                     }
     794             :                 }
     795             :                 else
     796          24 :                     unparented_idxs = lappend_int(unparented_idxs, listidx);
     797         214 :                 list_free(ancestors);
     798             :             }
     799             : 
     800             :             /*
     801             :              * If we found any indexes with no ancestors, it's possible that
     802             :              * some arbiter index is undergoing concurrent reindex.  Match all
     803             :              * unparented indexes against arbiters; add unparented matching
     804             :              * ones as "additional arbiters".
     805             :              *
     806             :              * This is critical so that all concurrent transactions use the
     807             :              * same set as arbiters during REINDEX CONCURRENTLY, to avoid
     808             :              * spurious "duplicate key" errors.
     809             :              */
     810         190 :             if (unparented_idxs && arbiterIndexes)
     811             :             {
     812          72 :                 foreach_int(unparented_i, unparented_idxs)
     813             :                 {
     814             :                     Relation    unparented_rel;
     815             :                     IndexInfo  *unparenred_ii;
     816             : 
     817          24 :                     unparented_rel = leaf_part_rri->ri_IndexRelationDescs[unparented_i];
     818          24 :                     unparenred_ii = leaf_part_rri->ri_IndexRelationInfo[unparented_i];
     819             : 
     820             :                     Assert(!list_member_oid(arbiterIndexes,
     821             :                                             unparented_rel->rd_index->indexrelid));
     822             : 
     823             :                     /* Ignore indexes not ready */
     824          24 :                     if (!unparenred_ii->ii_ReadyForInserts)
     825           0 :                         continue;
     826             : 
     827          60 :                     foreach_int(arbiter_i, arbiters_listidxs)
     828             :                     {
     829             :                         Relation    arbiter_rel;
     830             :                         IndexInfo  *arbiter_ii;
     831             : 
     832          24 :                         arbiter_rel = leaf_part_rri->ri_IndexRelationDescs[arbiter_i];
     833          24 :                         arbiter_ii = leaf_part_rri->ri_IndexRelationInfo[arbiter_i];
     834             : 
     835             :                         /*
     836             :                          * If the non-ancestor index is compatible with the
     837             :                          * arbiter, use the non-ancestor as arbiter too.
     838             :                          */
     839          24 :                         if (IsIndexCompatibleAsArbiter(arbiter_rel,
     840             :                                                        arbiter_ii,
     841             :                                                        unparented_rel,
     842             :                                                        unparenred_ii))
     843             :                         {
     844          12 :                             arbiterIndexes = lappend_oid(arbiterIndexes,
     845          12 :                                                          unparented_rel->rd_index->indexrelid);
     846          12 :                             additional_arbiters++;
     847          12 :                             break;
     848             :                         }
     849             :                     }
     850             :                 }
     851             :             }
     852         190 :             list_free(unparented_idxs);
     853         190 :             list_free(arbiters_listidxs);
     854             :         }
     855             : 
     856             :         /*
     857             :          * We expect to find as many arbiter indexes on this partition as the
     858             :          * root has, plus however many "additional arbiters" (to wit: those
     859             :          * being concurrently rebuilt) we found.
     860             :          */
     861         246 :         if (list_length(rootResultRelInfo->ri_onConflictArbiterIndexes) !=
     862         246 :             list_length(arbiterIndexes) - additional_arbiters)
     863           0 :             elog(ERROR, "invalid arbiter index list");
     864         246 :         leaf_part_rri->ri_onConflictArbiterIndexes = arbiterIndexes;
     865             : 
     866             :         /*
     867             :          * In the DO UPDATE case, we have some more state to initialize.
     868             :          */
     869         246 :         if (node->onConflictAction == ONCONFLICT_UPDATE)
     870             :         {
     871         178 :             OnConflictSetState *onconfl = makeNode(OnConflictSetState);
     872             :             TupleConversionMap *map;
     873             : 
     874         178 :             map = ExecGetRootToChildMap(leaf_part_rri, estate);
     875             : 
     876             :             Assert(node->onConflictSet != NIL);
     877             :             Assert(rootResultRelInfo->ri_onConflict != NULL);
     878             : 
     879         178 :             leaf_part_rri->ri_onConflict = onconfl;
     880             : 
     881             :             /*
     882             :              * Need a separate existing slot for each partition, as the
     883             :              * partition could be of a different AM, even if the tuple
     884             :              * descriptors match.
     885             :              */
     886         178 :             onconfl->oc_Existing =
     887         178 :                 table_slot_create(leaf_part_rri->ri_RelationDesc,
     888         178 :                                   &mtstate->ps.state->es_tupleTable);
     889             : 
     890             :             /*
     891             :              * If the partition's tuple descriptor matches exactly the root
     892             :              * parent (the common case), we can re-use most of the parent's ON
     893             :              * CONFLICT SET state, skipping a bunch of work.  Otherwise, we
     894             :              * need to create state specific to this partition.
     895             :              */
     896         178 :             if (map == NULL)
     897             :             {
     898             :                 /*
     899             :                  * It's safe to reuse these from the partition root, as we
     900             :                  * only process one tuple at a time (therefore we won't
     901             :                  * overwrite needed data in slots), and the results of
     902             :                  * projections are independent of the underlying storage.
     903             :                  * Projections and where clauses themselves don't store state
     904             :                  * / are independent of the underlying storage.
     905             :                  */
     906         102 :                 onconfl->oc_ProjSlot =
     907         102 :                     rootResultRelInfo->ri_onConflict->oc_ProjSlot;
     908         102 :                 onconfl->oc_ProjInfo =
     909         102 :                     rootResultRelInfo->ri_onConflict->oc_ProjInfo;
     910         102 :                 onconfl->oc_WhereClause =
     911         102 :                     rootResultRelInfo->ri_onConflict->oc_WhereClause;
     912             :             }
     913             :             else
     914             :             {
     915             :                 List       *onconflset;
     916             :                 List       *onconflcols;
     917             : 
     918             :                 /*
     919             :                  * Translate expressions in onConflictSet to account for
     920             :                  * different attribute numbers.  For that, map partition
     921             :                  * varattnos twice: first to catch the EXCLUDED
     922             :                  * pseudo-relation (INNER_VAR), and second to handle the main
     923             :                  * target relation (firstVarno).
     924             :                  */
     925          76 :                 onconflset = copyObject(node->onConflictSet);
     926          76 :                 if (part_attmap == NULL)
     927             :                     part_attmap =
     928          70 :                         build_attrmap_by_name(RelationGetDescr(partrel),
     929             :                                               RelationGetDescr(firstResultRel),
     930             :                                               false);
     931             :                 onconflset = (List *)
     932          76 :                     map_variable_attnos((Node *) onconflset,
     933             :                                         INNER_VAR, 0,
     934             :                                         part_attmap,
     935          76 :                                         RelationGetForm(partrel)->reltype,
     936             :                                         &found_whole_row);
     937             :                 /* We ignore the value of found_whole_row. */
     938             :                 onconflset = (List *)
     939          76 :                     map_variable_attnos((Node *) onconflset,
     940             :                                         firstVarno, 0,
     941             :                                         part_attmap,
     942          76 :                                         RelationGetForm(partrel)->reltype,
     943             :                                         &found_whole_row);
     944             :                 /* We ignore the value of found_whole_row. */
     945             : 
     946             :                 /* Finally, adjust the target colnos to match the partition. */
     947          76 :                 onconflcols = adjust_partition_colnos(node->onConflictCols,
     948             :                                                       leaf_part_rri);
     949             : 
     950             :                 /* create the tuple slot for the UPDATE SET projection */
     951          76 :                 onconfl->oc_ProjSlot =
     952          76 :                     table_slot_create(partrel,
     953          76 :                                       &mtstate->ps.state->es_tupleTable);
     954             : 
     955             :                 /* build UPDATE SET projection state */
     956          76 :                 onconfl->oc_ProjInfo =
     957          76 :                     ExecBuildUpdateProjection(onconflset,
     958             :                                               true,
     959             :                                               onconflcols,
     960             :                                               partrelDesc,
     961             :                                               econtext,
     962             :                                               onconfl->oc_ProjSlot,
     963             :                                               &mtstate->ps);
     964             : 
     965             :                 /*
     966             :                  * If there is a WHERE clause, initialize state where it will
     967             :                  * be evaluated, mapping the attribute numbers appropriately.
     968             :                  * As with onConflictSet, we need to map partition varattnos
     969             :                  * to the partition's tupdesc.
     970             :                  */
     971          76 :                 if (node->onConflictWhere)
     972             :                 {
     973             :                     List       *clause;
     974             : 
     975          30 :                     clause = copyObject((List *) node->onConflictWhere);
     976             :                     clause = (List *)
     977          30 :                         map_variable_attnos((Node *) clause,
     978             :                                             INNER_VAR, 0,
     979             :                                             part_attmap,
     980          30 :                                             RelationGetForm(partrel)->reltype,
     981             :                                             &found_whole_row);
     982             :                     /* We ignore the value of found_whole_row. */
     983             :                     clause = (List *)
     984          30 :                         map_variable_attnos((Node *) clause,
     985             :                                             firstVarno, 0,
     986             :                                             part_attmap,
     987          30 :                                             RelationGetForm(partrel)->reltype,
     988             :                                             &found_whole_row);
     989             :                     /* We ignore the value of found_whole_row. */
     990          30 :                     onconfl->oc_WhereClause =
     991          30 :                         ExecInitQual(clause, &mtstate->ps);
     992             :                 }
     993             :             }
     994             :         }
     995             :     }
     996             : 
     997             :     /*
     998             :      * Since we've just initialized this ResultRelInfo, it's not in any list
     999             :      * attached to the estate as yet.  Add it, so that it can be found later.
    1000             :      *
    1001             :      * Note that the entries in this list appear in no predetermined order,
    1002             :      * because partition result rels are initialized as and when they're
    1003             :      * needed.
    1004             :      */
    1005        8524 :     MemoryContextSwitchTo(estate->es_query_cxt);
    1006        8524 :     estate->es_tuple_routing_result_relations =
    1007        8524 :         lappend(estate->es_tuple_routing_result_relations,
    1008             :                 leaf_part_rri);
    1009             : 
    1010             :     /*
    1011             :      * Initialize information about this partition that's needed to handle
    1012             :      * MERGE.  We take the "first" result relation's mergeActionList as
    1013             :      * reference and make copy for this relation, converting stuff that
    1014             :      * references attribute numbers to match this relation's.
    1015             :      *
    1016             :      * This duplicates much of the logic in ExecInitMerge(), so if something
    1017             :      * changes there, look here too.
    1018             :      */
    1019        8524 :     if (node && node->operation == CMD_MERGE)
    1020             :     {
    1021          24 :         List       *firstMergeActionList = linitial(node->mergeActionLists);
    1022             :         ListCell   *lc;
    1023          24 :         ExprContext *econtext = mtstate->ps.ps_ExprContext;
    1024             :         Node       *joinCondition;
    1025             : 
    1026          24 :         if (part_attmap == NULL)
    1027             :             part_attmap =
    1028          12 :                 build_attrmap_by_name(RelationGetDescr(partrel),
    1029             :                                       RelationGetDescr(firstResultRel),
    1030             :                                       false);
    1031             : 
    1032          24 :         if (unlikely(!leaf_part_rri->ri_projectNewInfoValid))
    1033          24 :             ExecInitMergeTupleSlots(mtstate, leaf_part_rri);
    1034             : 
    1035             :         /* Initialize state for join condition checking. */
    1036             :         joinCondition =
    1037          24 :             map_variable_attnos(linitial(node->mergeJoinConditions),
    1038             :                                 firstVarno, 0,
    1039             :                                 part_attmap,
    1040          24 :                                 RelationGetForm(partrel)->reltype,
    1041             :                                 &found_whole_row);
    1042             :         /* We ignore the value of found_whole_row. */
    1043          24 :         leaf_part_rri->ri_MergeJoinCondition =
    1044          24 :             ExecInitQual((List *) joinCondition, &mtstate->ps);
    1045             : 
    1046          60 :         foreach(lc, firstMergeActionList)
    1047             :         {
    1048             :             /* Make a copy for this relation to be safe.  */
    1049          36 :             MergeAction *action = copyObject(lfirst(lc));
    1050             :             MergeActionState *action_state;
    1051             : 
    1052             :             /* Generate the action's state for this relation */
    1053          36 :             action_state = makeNode(MergeActionState);
    1054          36 :             action_state->mas_action = action;
    1055             : 
    1056             :             /* And put the action in the appropriate list */
    1057          72 :             leaf_part_rri->ri_MergeActions[action->matchKind] =
    1058          36 :                 lappend(leaf_part_rri->ri_MergeActions[action->matchKind],
    1059             :                         action_state);
    1060             : 
    1061          36 :             switch (action->commandType)
    1062             :             {
    1063          12 :                 case CMD_INSERT:
    1064             : 
    1065             :                     /*
    1066             :                      * ExecCheckPlanOutput() already done on the targetlist
    1067             :                      * when "first" result relation initialized and it is same
    1068             :                      * for all result relations.
    1069             :                      */
    1070          12 :                     action_state->mas_proj =
    1071          12 :                         ExecBuildProjectionInfo(action->targetList, econtext,
    1072             :                                                 leaf_part_rri->ri_newTupleSlot,
    1073             :                                                 &mtstate->ps,
    1074             :                                                 RelationGetDescr(partrel));
    1075          12 :                     break;
    1076          18 :                 case CMD_UPDATE:
    1077             : 
    1078             :                     /*
    1079             :                      * Convert updateColnos from "first" result relation
    1080             :                      * attribute numbers to this result rel's.
    1081             :                      */
    1082          18 :                     if (part_attmap)
    1083          18 :                         action->updateColnos =
    1084          18 :                             adjust_partition_colnos_using_map(action->updateColnos,
    1085             :                                                               part_attmap);
    1086          18 :                     action_state->mas_proj =
    1087          18 :                         ExecBuildUpdateProjection(action->targetList,
    1088             :                                                   true,
    1089             :                                                   action->updateColnos,
    1090          18 :                                                   RelationGetDescr(leaf_part_rri->ri_RelationDesc),
    1091             :                                                   econtext,
    1092             :                                                   leaf_part_rri->ri_newTupleSlot,
    1093             :                                                   NULL);
    1094          18 :                     break;
    1095           6 :                 case CMD_DELETE:
    1096             :                 case CMD_NOTHING:
    1097             :                     /* Nothing to do */
    1098           6 :                     break;
    1099             : 
    1100           0 :                 default:
    1101           0 :                     elog(ERROR, "unknown action in MERGE WHEN clause");
    1102             :             }
    1103             : 
    1104             :             /* found_whole_row intentionally ignored. */
    1105          36 :             action->qual =
    1106          36 :                 map_variable_attnos(action->qual,
    1107             :                                     firstVarno, 0,
    1108             :                                     part_attmap,
    1109          36 :                                     RelationGetForm(partrel)->reltype,
    1110             :                                     &found_whole_row);
    1111          36 :             action_state->mas_whenqual =
    1112          36 :                 ExecInitQual((List *) action->qual, &mtstate->ps);
    1113             :         }
    1114             :     }
    1115        8524 :     MemoryContextSwitchTo(oldcxt);
    1116             : 
    1117        8524 :     return leaf_part_rri;
    1118             : }
    1119             : 
    1120             : /*
    1121             :  * ExecInitRoutingInfo
    1122             :  *      Set up information needed for translating tuples between root
    1123             :  *      partitioned table format and partition format, and keep track of it
    1124             :  *      in PartitionTupleRouting.
    1125             :  */
    1126             : static void
    1127        9032 : ExecInitRoutingInfo(ModifyTableState *mtstate,
    1128             :                     EState *estate,
    1129             :                     PartitionTupleRouting *proute,
    1130             :                     PartitionDispatch dispatch,
    1131             :                     ResultRelInfo *partRelInfo,
    1132             :                     int partidx,
    1133             :                     bool is_borrowed_rel)
    1134             : {
    1135             :     MemoryContext oldcxt;
    1136             :     int         rri_index;
    1137             : 
    1138        9032 :     oldcxt = MemoryContextSwitchTo(proute->memcxt);
    1139             : 
    1140             :     /*
    1141             :      * Set up tuple conversion between root parent and the partition if the
    1142             :      * two have different rowtypes.  If conversion is indeed required, also
    1143             :      * initialize a slot dedicated to storing this partition's converted
    1144             :      * tuples.  Various operations that are applied to tuples after routing,
    1145             :      * such as checking constraints, will refer to this slot.
    1146             :      */
    1147        9032 :     if (ExecGetRootToChildMap(partRelInfo, estate) != NULL)
    1148             :     {
    1149        1318 :         Relation    partrel = partRelInfo->ri_RelationDesc;
    1150             : 
    1151             :         /*
    1152             :          * This pins the partition's TupleDesc, which will be released at the
    1153             :          * end of the command.
    1154             :          */
    1155        1318 :         partRelInfo->ri_PartitionTupleSlot =
    1156        1318 :             table_slot_create(partrel, &estate->es_tupleTable);
    1157             :     }
    1158             :     else
    1159        7714 :         partRelInfo->ri_PartitionTupleSlot = NULL;
    1160             : 
    1161             :     /*
    1162             :      * If the partition is a foreign table, let the FDW init itself for
    1163             :      * routing tuples to the partition.
    1164             :      */
    1165        9032 :     if (partRelInfo->ri_FdwRoutine != NULL &&
    1166          92 :         partRelInfo->ri_FdwRoutine->BeginForeignInsert != NULL)
    1167          92 :         partRelInfo->ri_FdwRoutine->BeginForeignInsert(mtstate, partRelInfo);
    1168             : 
    1169             :     /*
    1170             :      * Determine if the FDW supports batch insert and determine the batch size
    1171             :      * (a FDW may support batching, but it may be disabled for the
    1172             :      * server/table or for this particular query).
    1173             :      *
    1174             :      * If the FDW does not support batching, we set the batch size to 1.
    1175             :      */
    1176        9020 :     if (partRelInfo->ri_FdwRoutine != NULL &&
    1177          80 :         partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
    1178          80 :         partRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
    1179          80 :         partRelInfo->ri_BatchSize =
    1180          80 :             partRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(partRelInfo);
    1181             :     else
    1182        8940 :         partRelInfo->ri_BatchSize = 1;
    1183             : 
    1184             :     Assert(partRelInfo->ri_BatchSize >= 1);
    1185             : 
    1186        9020 :     partRelInfo->ri_CopyMultiInsertBuffer = NULL;
    1187             : 
    1188             :     /*
    1189             :      * Keep track of it in the PartitionTupleRouting->partitions array.
    1190             :      */
    1191             :     Assert(dispatch->indexes[partidx] == -1);
    1192             : 
    1193        9020 :     rri_index = proute->num_partitions++;
    1194             : 
    1195             :     /* Allocate or enlarge the array, as needed */
    1196        9020 :     if (proute->num_partitions >= proute->max_partitions)
    1197             :     {
    1198        6858 :         if (proute->max_partitions == 0)
    1199             :         {
    1200        6846 :             proute->max_partitions = 8;
    1201        6846 :             proute->partitions = palloc_array(ResultRelInfo *, proute->max_partitions);
    1202        6846 :             proute->is_borrowed_rel = palloc_array(bool, proute->max_partitions);
    1203             :         }
    1204             :         else
    1205             :         {
    1206          12 :             proute->max_partitions *= 2;
    1207          12 :             proute->partitions = (ResultRelInfo **)
    1208          12 :                 repalloc(proute->partitions, sizeof(ResultRelInfo *) *
    1209          12 :                          proute->max_partitions);
    1210          12 :             proute->is_borrowed_rel = (bool *)
    1211          12 :                 repalloc(proute->is_borrowed_rel, sizeof(bool) *
    1212          12 :                          proute->max_partitions);
    1213             :         }
    1214             :     }
    1215             : 
    1216        9020 :     proute->partitions[rri_index] = partRelInfo;
    1217        9020 :     proute->is_borrowed_rel[rri_index] = is_borrowed_rel;
    1218        9020 :     dispatch->indexes[partidx] = rri_index;
    1219             : 
    1220        9020 :     MemoryContextSwitchTo(oldcxt);
    1221        9020 : }
    1222             : 
    1223             : /*
    1224             :  * ExecInitPartitionDispatchInfo
    1225             :  *      Lock the partitioned table (if not locked already) and initialize
    1226             :  *      PartitionDispatch for a partitioned table and store it in the next
    1227             :  *      available slot in the proute->partition_dispatch_info array.  Also,
    1228             :  *      record the index into this array in the parent_pd->indexes[] array in
    1229             :  *      the partidx element so that we can properly retrieve the newly created
    1230             :  *      PartitionDispatch later.
    1231             :  */
    1232             : static PartitionDispatch
    1233        8360 : ExecInitPartitionDispatchInfo(EState *estate,
    1234             :                               PartitionTupleRouting *proute, Oid partoid,
    1235             :                               PartitionDispatch parent_pd, int partidx,
    1236             :                               ResultRelInfo *rootResultRelInfo)
    1237             : {
    1238             :     Relation    rel;
    1239             :     PartitionDesc partdesc;
    1240             :     PartitionDispatch pd;
    1241             :     int         dispatchidx;
    1242             :     MemoryContext oldcxt;
    1243             : 
    1244             :     /*
    1245             :      * For data modification, it is better that executor does not include
    1246             :      * partitions being detached, except when running in snapshot-isolation
    1247             :      * mode.  This means that a read-committed transaction immediately gets a
    1248             :      * "no partition for tuple" error when a tuple is inserted into a
    1249             :      * partition that's being detached concurrently, but a transaction in
    1250             :      * repeatable-read mode can still use such a partition.
    1251             :      */
    1252        8360 :     if (estate->es_partition_directory == NULL)
    1253        7136 :         estate->es_partition_directory =
    1254        7136 :             CreatePartitionDirectory(estate->es_query_cxt,
    1255             :                                      !IsolationUsesXactSnapshot());
    1256             : 
    1257        8360 :     oldcxt = MemoryContextSwitchTo(proute->memcxt);
    1258             : 
    1259             :     /*
    1260             :      * Only sub-partitioned tables need to be locked here.  The root
    1261             :      * partitioned table will already have been locked as it's referenced in
    1262             :      * the query's rtable.
    1263             :      */
    1264        8360 :     if (partoid != RelationGetRelid(proute->partition_root))
    1265        1188 :         rel = table_open(partoid, RowExclusiveLock);
    1266             :     else
    1267        7172 :         rel = proute->partition_root;
    1268        8360 :     partdesc = PartitionDirectoryLookup(estate->es_partition_directory, rel);
    1269             : 
    1270        8360 :     pd = (PartitionDispatch) palloc(offsetof(PartitionDispatchData, indexes) +
    1271        8360 :                                     partdesc->nparts * sizeof(int));
    1272        8360 :     pd->reldesc = rel;
    1273        8360 :     pd->key = RelationGetPartitionKey(rel);
    1274        8360 :     pd->keystate = NIL;
    1275        8360 :     pd->partdesc = partdesc;
    1276        8360 :     if (parent_pd != NULL)
    1277             :     {
    1278        1188 :         TupleDesc   tupdesc = RelationGetDescr(rel);
    1279             : 
    1280             :         /*
    1281             :          * For sub-partitioned tables where the column order differs from its
    1282             :          * direct parent partitioned table, we must store a tuple table slot
    1283             :          * initialized with its tuple descriptor and a tuple conversion map to
    1284             :          * convert a tuple from its parent's rowtype to its own.  This is to
    1285             :          * make sure that we are looking at the correct row using the correct
    1286             :          * tuple descriptor when computing its partition key for tuple
    1287             :          * routing.
    1288             :          */
    1289        1188 :         pd->tupmap = build_attrmap_by_name_if_req(RelationGetDescr(parent_pd->reldesc),
    1290             :                                                   tupdesc,
    1291             :                                                   false);
    1292        1188 :         pd->tupslot = pd->tupmap ?
    1293        1188 :             MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual) : NULL;
    1294             :     }
    1295             :     else
    1296             :     {
    1297             :         /* Not required for the root partitioned table */
    1298        7172 :         pd->tupmap = NULL;
    1299        7172 :         pd->tupslot = NULL;
    1300             :     }
    1301             : 
    1302             :     /*
    1303             :      * Initialize with -1 to signify that the corresponding partition's
    1304             :      * ResultRelInfo or PartitionDispatch has not been created yet.
    1305             :      */
    1306        8360 :     memset(pd->indexes, -1, sizeof(int) * partdesc->nparts);
    1307             : 
    1308             :     /* Track in PartitionTupleRouting for later use */
    1309        8360 :     dispatchidx = proute->num_dispatch++;
    1310             : 
    1311             :     /* Allocate or enlarge the array, as needed */
    1312        8360 :     if (proute->num_dispatch >= proute->max_dispatch)
    1313             :     {
    1314        7172 :         if (proute->max_dispatch == 0)
    1315             :         {
    1316        7172 :             proute->max_dispatch = 4;
    1317        7172 :             proute->partition_dispatch_info = palloc_array(PartitionDispatch, proute->max_dispatch);
    1318        7172 :             proute->nonleaf_partitions = palloc_array(ResultRelInfo *, proute->max_dispatch);
    1319             :         }
    1320             :         else
    1321             :         {
    1322           0 :             proute->max_dispatch *= 2;
    1323           0 :             proute->partition_dispatch_info = (PartitionDispatch *)
    1324           0 :                 repalloc(proute->partition_dispatch_info,
    1325           0 :                          sizeof(PartitionDispatch) * proute->max_dispatch);
    1326           0 :             proute->nonleaf_partitions = (ResultRelInfo **)
    1327           0 :                 repalloc(proute->nonleaf_partitions,
    1328           0 :                          sizeof(ResultRelInfo *) * proute->max_dispatch);
    1329             :         }
    1330             :     }
    1331        8360 :     proute->partition_dispatch_info[dispatchidx] = pd;
    1332             : 
    1333             :     /*
    1334             :      * If setting up a PartitionDispatch for a sub-partitioned table, we may
    1335             :      * also need a minimally valid ResultRelInfo for checking the partition
    1336             :      * constraint later; set that up now.
    1337             :      */
    1338        8360 :     if (parent_pd)
    1339             :     {
    1340        1188 :         ResultRelInfo *rri = makeNode(ResultRelInfo);
    1341             : 
    1342        1188 :         InitResultRelInfo(rri, rel, 0, rootResultRelInfo, 0);
    1343        1188 :         proute->nonleaf_partitions[dispatchidx] = rri;
    1344             :     }
    1345             :     else
    1346        7172 :         proute->nonleaf_partitions[dispatchidx] = NULL;
    1347             : 
    1348             :     /*
    1349             :      * Finally, if setting up a PartitionDispatch for a sub-partitioned table,
    1350             :      * install a downlink in the parent to allow quick descent.
    1351             :      */
    1352        8360 :     if (parent_pd)
    1353             :     {
    1354             :         Assert(parent_pd->indexes[partidx] == -1);
    1355        1188 :         parent_pd->indexes[partidx] = dispatchidx;
    1356             :     }
    1357             : 
    1358        8360 :     MemoryContextSwitchTo(oldcxt);
    1359             : 
    1360        8360 :     return pd;
    1361             : }
    1362             : 
    1363             : /*
    1364             :  * ExecCleanupTupleRouting -- Clean up objects allocated for partition tuple
    1365             :  * routing.
    1366             :  *
    1367             :  * Close all the partitioned tables, leaf partitions, and their indices.
    1368             :  */
    1369             : void
    1370        6364 : ExecCleanupTupleRouting(ModifyTableState *mtstate,
    1371             :                         PartitionTupleRouting *proute)
    1372             : {
    1373             :     int         i;
    1374             : 
    1375             :     /*
    1376             :      * Remember, proute->partition_dispatch_info[0] corresponds to the root
    1377             :      * partitioned table, which we must not try to close, because it is the
    1378             :      * main target table of the query that will be closed by callers such as
    1379             :      * ExecEndPlan() or DoCopy(). Also, tupslot is NULL for the root
    1380             :      * partitioned table.
    1381             :      */
    1382        7328 :     for (i = 1; i < proute->num_dispatch; i++)
    1383             :     {
    1384         964 :         PartitionDispatch pd = proute->partition_dispatch_info[i];
    1385             : 
    1386         964 :         table_close(pd->reldesc, NoLock);
    1387             : 
    1388         964 :         if (pd->tupslot)
    1389         454 :             ExecDropSingleTupleTableSlot(pd->tupslot);
    1390             :     }
    1391             : 
    1392       14816 :     for (i = 0; i < proute->num_partitions; i++)
    1393             :     {
    1394        8452 :         ResultRelInfo *resultRelInfo = proute->partitions[i];
    1395             : 
    1396             :         /* Allow any FDWs to shut down */
    1397        8452 :         if (resultRelInfo->ri_FdwRoutine != NULL &&
    1398          68 :             resultRelInfo->ri_FdwRoutine->EndForeignInsert != NULL)
    1399          68 :             resultRelInfo->ri_FdwRoutine->EndForeignInsert(mtstate->ps.state,
    1400             :                                                            resultRelInfo);
    1401             : 
    1402             :         /*
    1403             :          * Close it if it's not one of the result relations borrowed from the
    1404             :          * owning ModifyTableState; those will be closed by ExecEndPlan().
    1405             :          */
    1406        8452 :         if (proute->is_borrowed_rel[i])
    1407         460 :             continue;
    1408             : 
    1409        7992 :         ExecCloseIndices(resultRelInfo);
    1410        7992 :         table_close(resultRelInfo->ri_RelationDesc, NoLock);
    1411             :     }
    1412        6364 : }
    1413             : 
    1414             : /* ----------------
    1415             :  *      FormPartitionKeyDatum
    1416             :  *          Construct values[] and isnull[] arrays for the partition key
    1417             :  *          of a tuple.
    1418             :  *
    1419             :  *  pd              Partition dispatch object of the partitioned table
    1420             :  *  slot            Heap tuple from which to extract partition key
    1421             :  *  estate          executor state for evaluating any partition key
    1422             :  *                  expressions (must be non-NULL)
    1423             :  *  values          Array of partition key Datums (output area)
    1424             :  *  isnull          Array of is-null indicators (output area)
    1425             :  *
    1426             :  * the ecxt_scantuple slot of estate's per-tuple expr context must point to
    1427             :  * the heap tuple passed in.
    1428             :  * ----------------
    1429             :  */
    1430             : static void
    1431     1150148 : FormPartitionKeyDatum(PartitionDispatch pd,
    1432             :                       TupleTableSlot *slot,
    1433             :                       EState *estate,
    1434             :                       Datum *values,
    1435             :                       bool *isnull)
    1436             : {
    1437             :     ListCell   *partexpr_item;
    1438             :     int         i;
    1439             : 
    1440     1150148 :     if (pd->key->partexprs != NIL && pd->keystate == NIL)
    1441             :     {
    1442             :         /* Check caller has set up context correctly */
    1443             :         Assert(estate != NULL &&
    1444             :                GetPerTupleExprContext(estate)->ecxt_scantuple == slot);
    1445             : 
    1446             :         /* First time through, set up expression evaluation state */
    1447         534 :         pd->keystate = ExecPrepareExprList(pd->key->partexprs, estate);
    1448             :     }
    1449             : 
    1450     1150148 :     partexpr_item = list_head(pd->keystate);
    1451     2323108 :     for (i = 0; i < pd->key->partnatts; i++)
    1452             :     {
    1453     1172960 :         AttrNumber  keycol = pd->key->partattrs[i];
    1454             :         Datum       datum;
    1455             :         bool        isNull;
    1456             : 
    1457     1172960 :         if (keycol != 0)
    1458             :         {
    1459             :             /* Plain column; get the value directly from the heap tuple */
    1460     1085336 :             datum = slot_getattr(slot, keycol, &isNull);
    1461             :         }
    1462             :         else
    1463             :         {
    1464             :             /* Expression; need to evaluate it */
    1465       87624 :             if (partexpr_item == NULL)
    1466           0 :                 elog(ERROR, "wrong number of partition key expressions");
    1467       87624 :             datum = ExecEvalExprSwitchContext((ExprState *) lfirst(partexpr_item),
    1468       87624 :                                               GetPerTupleExprContext(estate),
    1469             :                                               &isNull);
    1470       87624 :             partexpr_item = lnext(pd->keystate, partexpr_item);
    1471             :         }
    1472     1172960 :         values[i] = datum;
    1473     1172960 :         isnull[i] = isNull;
    1474             :     }
    1475             : 
    1476     1150148 :     if (partexpr_item != NULL)
    1477           0 :         elog(ERROR, "wrong number of partition key expressions");
    1478     1150148 : }
    1479             : 
    1480             : /*
    1481             :  * The number of times the same partition must be found in a row before we
    1482             :  * switch from a binary search for the given values to just checking if the
    1483             :  * values belong to the last found partition.  This must be above 0.
    1484             :  */
    1485             : #define PARTITION_CACHED_FIND_THRESHOLD         16
    1486             : 
    1487             : /*
    1488             :  * get_partition_for_tuple
    1489             :  *      Finds partition of relation which accepts the partition key specified
    1490             :  *      in values and isnull.
    1491             :  *
    1492             :  * Calling this function can be quite expensive when LIST and RANGE
    1493             :  * partitioned tables have many partitions.  This is due to the binary search
    1494             :  * that's done to find the correct partition.  Many of the use cases for LIST
    1495             :  * and RANGE partitioned tables make it likely that the same partition is
    1496             :  * found in subsequent ExecFindPartition() calls.  This is especially true for
    1497             :  * cases such as RANGE partitioned tables on a TIMESTAMP column where the
    1498             :  * partition key is the current time.  When asked to find a partition for a
    1499             :  * RANGE or LIST partitioned table, we record the partition index and datum
    1500             :  * offset we've found for the given 'values' in the PartitionDesc (which is
    1501             :  * stored in relcache), and if we keep finding the same partition
    1502             :  * PARTITION_CACHED_FIND_THRESHOLD times in a row, then we'll enable caching
    1503             :  * logic and instead of performing a binary search to find the correct
    1504             :  * partition, we'll just double-check that 'values' still belong to the last
    1505             :  * found partition, and if so, we'll return that partition index, thus
    1506             :  * skipping the need for the binary search.  If we fail to match the last
    1507             :  * partition when double checking, then we fall back on doing a binary search.
    1508             :  * In this case, unless we find 'values' belong to the DEFAULT partition,
    1509             :  * we'll reset the number of times we've hit the same partition so that we
    1510             :  * don't attempt to use the cache again until we've found that partition at
    1511             :  * least PARTITION_CACHED_FIND_THRESHOLD times in a row.
    1512             :  *
    1513             :  * For cases where the partition changes on each lookup, the amount of
    1514             :  * additional work required just amounts to recording the last found partition
    1515             :  * and bound offset then resetting the found counter.  This is cheap and does
    1516             :  * not appear to cause any meaningful slowdowns for such cases.
    1517             :  *
    1518             :  * No caching of partitions is done when the last found partition is the
    1519             :  * DEFAULT or NULL partition.  For the case of the DEFAULT partition, there
    1520             :  * is no bound offset storing the matching datum, so we cannot confirm the
    1521             :  * indexes match.  For the NULL partition, this is just so cheap, there's no
    1522             :  * sense in caching.
    1523             :  *
    1524             :  * Return value is index of the partition (>= 0 and < partdesc->nparts) if one
    1525             :  * found or -1 if none found.
    1526             :  */
    1527             : static int
    1528     1150106 : get_partition_for_tuple(PartitionDispatch pd, const Datum *values, const bool *isnull)
    1529             : {
    1530     1150106 :     int         bound_offset = -1;
    1531     1150106 :     int         part_index = -1;
    1532     1150106 :     PartitionKey key = pd->key;
    1533     1150106 :     PartitionDesc partdesc = pd->partdesc;
    1534     1150106 :     PartitionBoundInfo boundinfo = partdesc->boundinfo;
    1535             : 
    1536             :     /*
    1537             :      * In the switch statement below, when we perform a cached lookup for
    1538             :      * RANGE and LIST partitioned tables, if we find that the last found
    1539             :      * partition matches the 'values', we return the partition index right
    1540             :      * away.  We do this instead of breaking out of the switch as we don't
    1541             :      * want to execute the code about the DEFAULT partition or do any updates
    1542             :      * for any of the cache-related fields.  That would be a waste of effort
    1543             :      * as we already know it's not the DEFAULT partition and have no need to
    1544             :      * increment the number of times we found the same partition any higher
    1545             :      * than PARTITION_CACHED_FIND_THRESHOLD.
    1546             :      */
    1547             : 
    1548             :     /* Route as appropriate based on partitioning strategy. */
    1549     1150106 :     switch (key->strategy)
    1550             :     {
    1551      212738 :         case PARTITION_STRATEGY_HASH:
    1552             :             {
    1553             :                 uint64      rowHash;
    1554             : 
    1555             :                 /* hash partitioning is too cheap to bother caching */
    1556      212738 :                 rowHash = compute_partition_hash_value(key->partnatts,
    1557             :                                                        key->partsupfunc,
    1558      212738 :                                                        key->partcollation,
    1559             :                                                        values, isnull);
    1560             : 
    1561             :                 /*
    1562             :                  * HASH partitions can't have a DEFAULT partition and we don't
    1563             :                  * do any caching work for them, so just return the part index
    1564             :                  */
    1565      212726 :                 return boundinfo->indexes[rowHash % boundinfo->nindexes];
    1566             :             }
    1567             : 
    1568      171030 :         case PARTITION_STRATEGY_LIST:
    1569      171030 :             if (isnull[0])
    1570             :             {
    1571             :                 /* this is far too cheap to bother doing any caching */
    1572         132 :                 if (partition_bound_accepts_nulls(boundinfo))
    1573             :                 {
    1574             :                     /*
    1575             :                      * When there is a NULL partition we just return that
    1576             :                      * directly.  We don't have a bound_offset so it's not
    1577             :                      * valid to drop into the code after the switch which
    1578             :                      * checks and updates the cache fields.  We perhaps should
    1579             :                      * be invalidating the details of the last cached
    1580             :                      * partition but there's no real need to.  Keeping those
    1581             :                      * fields set gives a chance at matching to the cached
    1582             :                      * partition on the next lookup.
    1583             :                      */
    1584         102 :                     return boundinfo->null_index;
    1585             :                 }
    1586             :             }
    1587             :             else
    1588             :             {
    1589             :                 bool        equal;
    1590             : 
    1591      170898 :                 if (partdesc->last_found_count >= PARTITION_CACHED_FIND_THRESHOLD)
    1592             :                 {
    1593       23892 :                     int         last_datum_offset = partdesc->last_found_datum_index;
    1594       23892 :                     Datum       lastDatum = boundinfo->datums[last_datum_offset][0];
    1595             :                     int32       cmpval;
    1596             : 
    1597             :                     /* does the last found datum index match this datum? */
    1598       23892 :                     cmpval = DatumGetInt32(FunctionCall2Coll(&key->partsupfunc[0],
    1599       23892 :                                                              key->partcollation[0],
    1600             :                                                              lastDatum,
    1601             :                                                              values[0]));
    1602             : 
    1603       23892 :                     if (cmpval == 0)
    1604       23538 :                         return boundinfo->indexes[last_datum_offset];
    1605             : 
    1606             :                     /* fall-through and do a manual lookup */
    1607             :                 }
    1608             : 
    1609      147360 :                 bound_offset = partition_list_bsearch(key->partsupfunc,
    1610             :                                                       key->partcollation,
    1611             :                                                       boundinfo,
    1612             :                                                       values[0], &equal);
    1613      147360 :                 if (bound_offset >= 0 && equal)
    1614      146960 :                     part_index = boundinfo->indexes[bound_offset];
    1615             :             }
    1616      147390 :             break;
    1617             : 
    1618      766338 :         case PARTITION_STRATEGY_RANGE:
    1619             :             {
    1620      766338 :                 bool        equal = false,
    1621      766338 :                             range_partkey_has_null = false;
    1622             :                 int         i;
    1623             : 
    1624             :                 /*
    1625             :                  * No range includes NULL, so this will be accepted by the
    1626             :                  * default partition if there is one, and otherwise rejected.
    1627             :                  */
    1628     1555068 :                 for (i = 0; i < key->partnatts; i++)
    1629             :                 {
    1630      788784 :                     if (isnull[i])
    1631             :                     {
    1632          54 :                         range_partkey_has_null = true;
    1633          54 :                         break;
    1634             :                     }
    1635             :                 }
    1636             : 
    1637             :                 /* NULLs belong in the DEFAULT partition */
    1638      766338 :                 if (range_partkey_has_null)
    1639          54 :                     break;
    1640             : 
    1641      766284 :                 if (partdesc->last_found_count >= PARTITION_CACHED_FIND_THRESHOLD)
    1642             :                 {
    1643      249654 :                     int         last_datum_offset = partdesc->last_found_datum_index;
    1644      249654 :                     Datum      *lastDatums = boundinfo->datums[last_datum_offset];
    1645      249654 :                     PartitionRangeDatumKind *kind = boundinfo->kind[last_datum_offset];
    1646             :                     int32       cmpval;
    1647             : 
    1648             :                     /* check if the value is >= to the lower bound */
    1649      249654 :                     cmpval = partition_rbound_datum_cmp(key->partsupfunc,
    1650             :                                                         key->partcollation,
    1651             :                                                         lastDatums,
    1652             :                                                         kind,
    1653             :                                                         values,
    1654      249654 :                                                         key->partnatts);
    1655             : 
    1656             :                     /*
    1657             :                      * If it's equal to the lower bound then no need to check
    1658             :                      * the upper bound.
    1659             :                      */
    1660      249654 :                     if (cmpval == 0)
    1661      249344 :                         return boundinfo->indexes[last_datum_offset + 1];
    1662             : 
    1663      243756 :                     if (cmpval < 0 && last_datum_offset + 1 < boundinfo->ndatums)
    1664             :                     {
    1665             :                         /* check if the value is below the upper bound */
    1666      243696 :                         lastDatums = boundinfo->datums[last_datum_offset + 1];
    1667      243696 :                         kind = boundinfo->kind[last_datum_offset + 1];
    1668      243696 :                         cmpval = partition_rbound_datum_cmp(key->partsupfunc,
    1669             :                                                             key->partcollation,
    1670             :                                                             lastDatums,
    1671             :                                                             kind,
    1672             :                                                             values,
    1673      243696 :                                                             key->partnatts);
    1674             : 
    1675      243696 :                         if (cmpval > 0)
    1676      243446 :                             return boundinfo->indexes[last_datum_offset + 1];
    1677             :                     }
    1678             :                     /* fall-through and do a manual lookup */
    1679             :                 }
    1680             : 
    1681      516940 :                 bound_offset = partition_range_datum_bsearch(key->partsupfunc,
    1682             :                                                              key->partcollation,
    1683             :                                                              boundinfo,
    1684      516940 :                                                              key->partnatts,
    1685             :                                                              values,
    1686             :                                                              &equal);
    1687             : 
    1688             :                 /*
    1689             :                  * The bound at bound_offset is less than or equal to the
    1690             :                  * tuple value, so the bound at offset+1 is the upper bound of
    1691             :                  * the partition we're looking for, if there actually exists
    1692             :                  * one.
    1693             :                  */
    1694      516940 :                 part_index = boundinfo->indexes[bound_offset + 1];
    1695             :             }
    1696      516940 :             break;
    1697             : 
    1698           0 :         default:
    1699           0 :             elog(ERROR, "unexpected partition strategy: %d",
    1700             :                  (int) key->strategy);
    1701             :     }
    1702             : 
    1703             :     /*
    1704             :      * part_index < 0 means we failed to find a partition of this parent. Use
    1705             :      * the default partition, if there is one.
    1706             :      */
    1707      664384 :     if (part_index < 0)
    1708             :     {
    1709             :         /*
    1710             :          * No need to reset the cache fields here.  The next set of values
    1711             :          * might end up belonging to the cached partition, so leaving the
    1712             :          * cache alone improves the chances of a cache hit on the next lookup.
    1713             :          */
    1714         708 :         return boundinfo->default_index;
    1715             :     }
    1716             : 
    1717             :     /* we should only make it here when the code above set bound_offset */
    1718             :     Assert(bound_offset >= 0);
    1719             : 
    1720             :     /*
    1721             :      * Attend to the cache fields.  If the bound_offset matches the last
    1722             :      * cached bound offset then we've found the same partition as last time,
    1723             :      * so bump the count by one.  If all goes well, we'll eventually reach
    1724             :      * PARTITION_CACHED_FIND_THRESHOLD and try the cache path next time
    1725             :      * around.  Otherwise, we'll reset the cache count back to 1 to mark that
    1726             :      * we've found this partition for the first time.
    1727             :      */
    1728      663676 :     if (bound_offset == partdesc->last_found_datum_index)
    1729      461214 :         partdesc->last_found_count++;
    1730             :     else
    1731             :     {
    1732      202462 :         partdesc->last_found_count = 1;
    1733      202462 :         partdesc->last_found_part_index = part_index;
    1734      202462 :         partdesc->last_found_datum_index = bound_offset;
    1735             :     }
    1736             : 
    1737      663676 :     return part_index;
    1738             : }
    1739             : 
    1740             : /*
    1741             :  * ExecBuildSlotPartitionKeyDescription
    1742             :  *
    1743             :  * This works very much like BuildIndexValueDescription() and is currently
    1744             :  * used for building error messages when ExecFindPartition() fails to find
    1745             :  * partition for a row.
    1746             :  */
    1747             : static char *
    1748         154 : ExecBuildSlotPartitionKeyDescription(Relation rel,
    1749             :                                      const Datum *values,
    1750             :                                      const bool *isnull,
    1751             :                                      int maxfieldlen)
    1752             : {
    1753             :     StringInfoData buf;
    1754         154 :     PartitionKey key = RelationGetPartitionKey(rel);
    1755         154 :     int         partnatts = get_partition_natts(key);
    1756             :     int         i;
    1757         154 :     Oid         relid = RelationGetRelid(rel);
    1758             :     AclResult   aclresult;
    1759             : 
    1760         154 :     if (check_enable_rls(relid, InvalidOid, true) == RLS_ENABLED)
    1761           0 :         return NULL;
    1762             : 
    1763             :     /* If the user has table-level access, just go build the description. */
    1764         154 :     aclresult = pg_class_aclcheck(relid, GetUserId(), ACL_SELECT);
    1765         154 :     if (aclresult != ACLCHECK_OK)
    1766             :     {
    1767             :         /*
    1768             :          * Step through the columns of the partition key and make sure the
    1769             :          * user has SELECT rights on all of them.
    1770             :          */
    1771          24 :         for (i = 0; i < partnatts; i++)
    1772             :         {
    1773          18 :             AttrNumber  attnum = get_partition_col_attnum(key, i);
    1774             : 
    1775             :             /*
    1776             :              * If this partition key column is an expression, we return no
    1777             :              * detail rather than try to figure out what column(s) the
    1778             :              * expression includes and if the user has SELECT rights on them.
    1779             :              */
    1780          30 :             if (attnum == InvalidAttrNumber ||
    1781          12 :                 pg_attribute_aclcheck(relid, attnum, GetUserId(),
    1782             :                                       ACL_SELECT) != ACLCHECK_OK)
    1783          12 :                 return NULL;
    1784             :         }
    1785             :     }
    1786             : 
    1787         142 :     initStringInfo(&buf);
    1788         142 :     appendStringInfo(&buf, "(%s) = (",
    1789             :                      pg_get_partkeydef_columns(relid, true));
    1790             : 
    1791         338 :     for (i = 0; i < partnatts; i++)
    1792             :     {
    1793             :         char       *val;
    1794             :         int         vallen;
    1795             : 
    1796         196 :         if (isnull[i])
    1797          30 :             val = "null";
    1798             :         else
    1799             :         {
    1800             :             Oid         foutoid;
    1801             :             bool        typisvarlena;
    1802             : 
    1803         166 :             getTypeOutputInfo(get_partition_col_typid(key, i),
    1804             :                               &foutoid, &typisvarlena);
    1805         166 :             val = OidOutputFunctionCall(foutoid, values[i]);
    1806             :         }
    1807             : 
    1808         196 :         if (i > 0)
    1809          54 :             appendStringInfoString(&buf, ", ");
    1810             : 
    1811             :         /* truncate if needed */
    1812         196 :         vallen = strlen(val);
    1813         196 :         if (vallen <= maxfieldlen)
    1814         196 :             appendBinaryStringInfo(&buf, val, vallen);
    1815             :         else
    1816             :         {
    1817           0 :             vallen = pg_mbcliplen(val, vallen, maxfieldlen);
    1818           0 :             appendBinaryStringInfo(&buf, val, vallen);
    1819           0 :             appendStringInfoString(&buf, "...");
    1820             :         }
    1821             :     }
    1822             : 
    1823         142 :     appendStringInfoChar(&buf, ')');
    1824             : 
    1825         142 :     return buf.data;
    1826             : }
    1827             : 
    1828             : /*
    1829             :  * adjust_partition_colnos
    1830             :  *      Adjust the list of UPDATE target column numbers to account for
    1831             :  *      attribute differences between the parent and the partition.
    1832             :  *
    1833             :  * Note: mustn't be called if no adjustment is required.
    1834             :  */
    1835             : static List *
    1836          76 : adjust_partition_colnos(List *colnos, ResultRelInfo *leaf_part_rri)
    1837             : {
    1838          76 :     TupleConversionMap *map = ExecGetChildToRootMap(leaf_part_rri);
    1839             : 
    1840             :     Assert(map != NULL);
    1841             : 
    1842          76 :     return adjust_partition_colnos_using_map(colnos, map->attrMap);
    1843             : }
    1844             : 
    1845             : /*
    1846             :  * adjust_partition_colnos_using_map
    1847             :  *      Like adjust_partition_colnos, but uses a caller-supplied map instead
    1848             :  *      of assuming to map from the "root" result relation.
    1849             :  *
    1850             :  * Note: mustn't be called if no adjustment is required.
    1851             :  */
    1852             : static List *
    1853          94 : adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
    1854             : {
    1855          94 :     List       *new_colnos = NIL;
    1856             :     ListCell   *lc;
    1857             : 
    1858             :     Assert(attrMap != NULL);    /* else we shouldn't be here */
    1859             : 
    1860         232 :     foreach(lc, colnos)
    1861             :     {
    1862         138 :         AttrNumber  parentattrno = lfirst_int(lc);
    1863             : 
    1864         138 :         if (parentattrno <= 0 ||
    1865         138 :             parentattrno > attrMap->maplen ||
    1866         138 :             attrMap->attnums[parentattrno - 1] == 0)
    1867           0 :             elog(ERROR, "unexpected attno %d in target column list",
    1868             :                  parentattrno);
    1869         138 :         new_colnos = lappend_int(new_colnos,
    1870         138 :                                  attrMap->attnums[parentattrno - 1]);
    1871             :     }
    1872             : 
    1873          94 :     return new_colnos;
    1874             : }
    1875             : 
    1876             : /*-------------------------------------------------------------------------
    1877             :  * Run-Time Partition Pruning Support.
    1878             :  *
    1879             :  * The following series of functions exist to support the removal of unneeded
    1880             :  * subplans for queries against partitioned tables.  The supporting functions
    1881             :  * here are designed to work with any plan type which supports an arbitrary
    1882             :  * number of subplans, e.g. Append, MergeAppend.
    1883             :  *
    1884             :  * When pruning involves comparison of a partition key to a constant, it's
    1885             :  * done by the planner.  However, if we have a comparison to a non-constant
    1886             :  * but not volatile expression, that presents an opportunity for run-time
    1887             :  * pruning by the executor, allowing irrelevant partitions to be skipped
    1888             :  * dynamically.
    1889             :  *
    1890             :  * We must distinguish expressions containing PARAM_EXEC Params from
    1891             :  * expressions that don't contain those.  Even though a PARAM_EXEC Param is
    1892             :  * considered to be a stable expression, it can change value from one plan
    1893             :  * node scan to the next during query execution.  Stable comparison
    1894             :  * expressions that don't involve such Params allow partition pruning to be
    1895             :  * done once during executor startup.  Expressions that do involve such Params
    1896             :  * require us to prune separately for each scan of the parent plan node.
    1897             :  *
    1898             :  * Note that pruning away unneeded subplans during executor startup has the
    1899             :  * added benefit of not having to initialize the unneeded subplans at all.
    1900             :  *
    1901             :  *
    1902             :  * Functions:
    1903             :  *
    1904             :  * ExecDoInitialPruning:
    1905             :  *      Perform runtime "initial" pruning, if necessary, to determine the set
    1906             :  *      of child subnodes that need to be initialized during ExecInitNode() for
    1907             :  *      all plan nodes that contain a PartitionPruneInfo.
    1908             :  *
    1909             :  * ExecInitPartitionExecPruning:
    1910             :  *      Updates the PartitionPruneState found at given part_prune_index in
    1911             :  *      EState.es_part_prune_states for use during "exec" pruning if required.
    1912             :  *      Also returns the set of subplans to initialize that would be stored at
    1913             :  *      part_prune_index in EState.es_part_prune_results by
    1914             :  *      ExecDoInitialPruning().  Maps in PartitionPruneState are updated to
    1915             :  *      account for initial pruning possibly having eliminated some of the
    1916             :  *      subplans.
    1917             :  *
    1918             :  * ExecFindMatchingSubPlans:
    1919             :  *      Returns indexes of matching subplans after evaluating the expressions
    1920             :  *      that are safe to evaluate at a given point.  This function is first
    1921             :  *      called during ExecDoInitialPruning() to find the initially matching
    1922             :  *      subplans based on performing the initial pruning steps and then must be
    1923             :  *      called again each time the value of a Param listed in
    1924             :  *      PartitionPruneState's 'execparamids' changes.
    1925             :  *-------------------------------------------------------------------------
    1926             :  */
    1927             : 
    1928             : 
    1929             : /*
    1930             :  * ExecDoInitialPruning
    1931             :  *      Perform runtime "initial" pruning, if necessary, to determine the set
    1932             :  *      of child subnodes that need to be initialized during ExecInitNode() for
    1933             :  *      plan nodes that support partition pruning.
    1934             :  *
    1935             :  * This function iterates over each PartitionPruneInfo entry in
    1936             :  * estate->es_part_prune_infos. For each entry, it creates a PartitionPruneState
    1937             :  * and adds it to es_part_prune_states.  ExecInitPartitionExecPruning() accesses
    1938             :  * these states through their corresponding indexes in es_part_prune_states and
    1939             :  * assign each state to the parent node's PlanState, from where it will be used
    1940             :  * for "exec" pruning.
    1941             :  *
    1942             :  * If initial pruning steps exist for a PartitionPruneInfo entry, this function
    1943             :  * executes those pruning steps and stores the result as a bitmapset of valid
    1944             :  * child subplans, identifying which subplans should be initialized for
    1945             :  * execution.  The results are saved in estate->es_part_prune_results.
    1946             :  *
    1947             :  * If no initial pruning is performed for a given PartitionPruneInfo, a NULL
    1948             :  * entry  is still added to es_part_prune_results to maintain alignment with
    1949             :  * es_part_prune_infos. This ensures that ExecInitPartitionExecPruning() can
    1950             :  * use the same index to retrieve the pruning results.
    1951             :  */
    1952             : void
    1953      587844 : ExecDoInitialPruning(EState *estate)
    1954             : {
    1955             :     ListCell   *lc;
    1956             : 
    1957      588646 :     foreach(lc, estate->es_part_prune_infos)
    1958             :     {
    1959         802 :         PartitionPruneInfo *pruneinfo = lfirst_node(PartitionPruneInfo, lc);
    1960             :         PartitionPruneState *prunestate;
    1961         802 :         Bitmapset  *validsubplans = NULL;
    1962         802 :         Bitmapset  *all_leafpart_rtis = NULL;
    1963         802 :         Bitmapset  *validsubplan_rtis = NULL;
    1964             : 
    1965             :         /* Create and save the PartitionPruneState. */
    1966         802 :         prunestate = CreatePartitionPruneState(estate, pruneinfo,
    1967             :                                                &all_leafpart_rtis);
    1968         802 :         estate->es_part_prune_states = lappend(estate->es_part_prune_states,
    1969             :                                                prunestate);
    1970             : 
    1971             :         /*
    1972             :          * Perform initial pruning steps, if any, and save the result
    1973             :          * bitmapset or NULL as described in the header comment.
    1974             :          */
    1975         802 :         if (prunestate->do_initial_prune)
    1976         448 :             validsubplans = ExecFindMatchingSubPlans(prunestate, true,
    1977             :                                                      &validsubplan_rtis);
    1978             :         else
    1979         354 :             validsubplan_rtis = all_leafpart_rtis;
    1980             : 
    1981         802 :         estate->es_unpruned_relids = bms_add_members(estate->es_unpruned_relids,
    1982             :                                                      validsubplan_rtis);
    1983         802 :         estate->es_part_prune_results = lappend(estate->es_part_prune_results,
    1984             :                                                 validsubplans);
    1985             :     }
    1986      587844 : }
    1987             : 
    1988             : /*
    1989             :  * ExecInitPartitionExecPruning
    1990             :  *      Initialize the data structures needed for runtime "exec" partition
    1991             :  *      pruning and return the result of initial pruning, if available.
    1992             :  *
    1993             :  * 'relids' identifies the relation to which both the parent plan and the
    1994             :  * PartitionPruneInfo given by 'part_prune_index' belong.
    1995             :  *
    1996             :  * On return, *initially_valid_subplans is assigned the set of indexes of
    1997             :  * child subplans that must be initialized along with the parent plan node.
    1998             :  * Initial pruning would have been performed by ExecDoInitialPruning(), if
    1999             :  * necessary, and the bitmapset of surviving subplans' indexes would have
    2000             :  * been stored as the part_prune_index'th element of
    2001             :  * EState.es_part_prune_results.
    2002             :  *
    2003             :  * If subplans were indeed pruned during initial pruning, the subplan_map
    2004             :  * arrays in the returned PartitionPruneState are re-sequenced to exclude those
    2005             :  * subplans, but only if the maps will be needed for subsequent execution
    2006             :  * pruning passes.
    2007             :  */
    2008             : PartitionPruneState *
    2009         806 : ExecInitPartitionExecPruning(PlanState *planstate,
    2010             :                              int n_total_subplans,
    2011             :                              int part_prune_index,
    2012             :                              Bitmapset *relids,
    2013             :                              Bitmapset **initially_valid_subplans)
    2014             : {
    2015             :     PartitionPruneState *prunestate;
    2016         806 :     EState     *estate = planstate->state;
    2017             :     PartitionPruneInfo *pruneinfo;
    2018             : 
    2019             :     /* Obtain the pruneinfo we need. */
    2020         806 :     pruneinfo = list_nth_node(PartitionPruneInfo, estate->es_part_prune_infos,
    2021             :                               part_prune_index);
    2022             : 
    2023             :     /* Its relids better match the plan node's or the planner messed up. */
    2024         806 :     if (!bms_equal(relids, pruneinfo->relids))
    2025           0 :         elog(ERROR, "wrong pruneinfo with relids=%s found at part_prune_index=%d contained in plan node with relids=%s",
    2026             :              bmsToString(pruneinfo->relids), part_prune_index,
    2027             :              bmsToString(relids));
    2028             : 
    2029             :     /*
    2030             :      * The PartitionPruneState would have been created by
    2031             :      * ExecDoInitialPruning() and stored as the part_prune_index'th element of
    2032             :      * EState.es_part_prune_states.
    2033             :      */
    2034         806 :     prunestate = list_nth(estate->es_part_prune_states, part_prune_index);
    2035             :     Assert(prunestate != NULL);
    2036             : 
    2037             :     /* Use the result of initial pruning done by ExecDoInitialPruning(). */
    2038         806 :     if (prunestate->do_initial_prune)
    2039         450 :         *initially_valid_subplans = list_nth_node(Bitmapset,
    2040             :                                                   estate->es_part_prune_results,
    2041             :                                                   part_prune_index);
    2042             :     else
    2043             :     {
    2044             :         /* No pruning, so we'll need to initialize all subplans */
    2045             :         Assert(n_total_subplans > 0);
    2046         356 :         *initially_valid_subplans = bms_add_range(NULL, 0,
    2047             :                                                   n_total_subplans - 1);
    2048             :     }
    2049             : 
    2050             :     /*
    2051             :      * The exec pruning state must also be initialized, if needed, before it
    2052             :      * can be used for pruning during execution.
    2053             :      *
    2054             :      * This also re-sequences subplan indexes contained in prunestate to
    2055             :      * account for any that were removed due to initial pruning; refer to the
    2056             :      * condition in InitExecPartitionPruneContexts() that is used to determine
    2057             :      * whether to do this.  If no exec pruning needs to be done, we would thus
    2058             :      * leave the maps to be in an invalid state, but that's ok since that data
    2059             :      * won't be consulted again (cf initial Assert in
    2060             :      * ExecFindMatchingSubPlans).
    2061             :      */
    2062         806 :     if (prunestate->do_exec_prune)
    2063         398 :         InitExecPartitionPruneContexts(prunestate, planstate,
    2064             :                                        *initially_valid_subplans,
    2065             :                                        n_total_subplans);
    2066             : 
    2067         806 :     return prunestate;
    2068             : }
    2069             : 
    2070             : /*
    2071             :  * CreatePartitionPruneState
    2072             :  *      Build the data structure required for calling ExecFindMatchingSubPlans
    2073             :  *
    2074             :  * This includes PartitionPruneContexts (stored in each
    2075             :  * PartitionedRelPruningData corresponding to a PartitionedRelPruneInfo),
    2076             :  * which hold the ExprStates needed to evaluate pruning expressions, and
    2077             :  * mapping arrays to convert partition indexes from the pruning logic
    2078             :  * into subplan indexes in the parent plan node's list of child subplans.
    2079             :  *
    2080             :  * 'pruneinfo' is a PartitionPruneInfo as generated by
    2081             :  * make_partition_pruneinfo.  Here we build a PartitionPruneState containing a
    2082             :  * PartitionPruningData for each partitioning hierarchy (i.e., each sublist of
    2083             :  * pruneinfo->prune_infos), each of which contains a PartitionedRelPruningData
    2084             :  * for each PartitionedRelPruneInfo appearing in that sublist.  This two-level
    2085             :  * system is needed to keep from confusing the different hierarchies when a
    2086             :  * UNION ALL contains multiple partitioned tables as children.  The data
    2087             :  * stored in each PartitionedRelPruningData can be re-used each time we
    2088             :  * re-evaluate which partitions match the pruning steps provided in each
    2089             :  * PartitionedRelPruneInfo.
    2090             :  *
    2091             :  * Note that only the PartitionPruneContexts for initial pruning are
    2092             :  * initialized here. Those required for exec pruning are initialized later in
    2093             :  * ExecInitPartitionExecPruning(), as they depend on the availability of the
    2094             :  * parent plan node's PlanState.
    2095             :  *
    2096             :  * If initial pruning steps are to be skipped (e.g., during EXPLAIN
    2097             :  * (GENERIC_PLAN)), *all_leafpart_rtis will be populated with the RT indexes of
    2098             :  * all leaf partitions whose scanning subnode is included in the parent plan
    2099             :  * node's list of child plans. The caller must add these RT indexes to
    2100             :  * estate->es_unpruned_relids.
    2101             :  */
    2102             : static PartitionPruneState *
    2103         802 : CreatePartitionPruneState(EState *estate, PartitionPruneInfo *pruneinfo,
    2104             :                           Bitmapset **all_leafpart_rtis)
    2105             : {
    2106             :     PartitionPruneState *prunestate;
    2107             :     int         n_part_hierarchies;
    2108             :     ListCell   *lc;
    2109             :     int         i;
    2110             : 
    2111             :     /*
    2112             :      * Expression context that will be used by partkey_datum_from_expr() to
    2113             :      * evaluate expressions for comparison against partition bounds.
    2114             :      */
    2115         802 :     ExprContext *econtext = CreateExprContext(estate);
    2116             : 
    2117             :     /* For data reading, executor always includes detached partitions */
    2118         802 :     if (estate->es_partition_directory == NULL)
    2119         754 :         estate->es_partition_directory =
    2120         754 :             CreatePartitionDirectory(estate->es_query_cxt, false);
    2121             : 
    2122         802 :     n_part_hierarchies = list_length(pruneinfo->prune_infos);
    2123             :     Assert(n_part_hierarchies > 0);
    2124             : 
    2125             :     /*
    2126             :      * Allocate the data structure
    2127             :      */
    2128             :     prunestate = (PartitionPruneState *)
    2129         802 :         palloc(offsetof(PartitionPruneState, partprunedata) +
    2130             :                sizeof(PartitionPruningData *) * n_part_hierarchies);
    2131             : 
    2132             :     /* Save ExprContext for use during InitExecPartitionPruneContexts(). */
    2133         802 :     prunestate->econtext = econtext;
    2134         802 :     prunestate->execparamids = NULL;
    2135             :     /* other_subplans can change at runtime, so we need our own copy */
    2136         802 :     prunestate->other_subplans = bms_copy(pruneinfo->other_subplans);
    2137         802 :     prunestate->do_initial_prune = false;    /* may be set below */
    2138         802 :     prunestate->do_exec_prune = false;   /* may be set below */
    2139         802 :     prunestate->num_partprunedata = n_part_hierarchies;
    2140             : 
    2141             :     /*
    2142             :      * Create a short-term memory context which we'll use when making calls to
    2143             :      * the partition pruning functions.  This avoids possible memory leaks,
    2144             :      * since the pruning functions call comparison functions that aren't under
    2145             :      * our control.
    2146             :      */
    2147         802 :     prunestate->prune_context =
    2148         802 :         AllocSetContextCreate(CurrentMemoryContext,
    2149             :                               "Partition Prune",
    2150             :                               ALLOCSET_DEFAULT_SIZES);
    2151             : 
    2152         802 :     i = 0;
    2153        1628 :     foreach(lc, pruneinfo->prune_infos)
    2154             :     {
    2155         826 :         List       *partrelpruneinfos = lfirst_node(List, lc);
    2156         826 :         int         npartrelpruneinfos = list_length(partrelpruneinfos);
    2157             :         PartitionPruningData *prunedata;
    2158             :         ListCell   *lc2;
    2159             :         int         j;
    2160             : 
    2161             :         prunedata = (PartitionPruningData *)
    2162         826 :             palloc(offsetof(PartitionPruningData, partrelprunedata) +
    2163         826 :                    npartrelpruneinfos * sizeof(PartitionedRelPruningData));
    2164         826 :         prunestate->partprunedata[i] = prunedata;
    2165         826 :         prunedata->num_partrelprunedata = npartrelpruneinfos;
    2166             : 
    2167         826 :         j = 0;
    2168        2462 :         foreach(lc2, partrelpruneinfos)
    2169             :         {
    2170        1636 :             PartitionedRelPruneInfo *pinfo = lfirst_node(PartitionedRelPruneInfo, lc2);
    2171        1636 :             PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
    2172             :             Relation    partrel;
    2173             :             PartitionDesc partdesc;
    2174             :             PartitionKey partkey;
    2175             : 
    2176             :             /*
    2177             :              * We can rely on the copies of the partitioned table's partition
    2178             :              * key and partition descriptor appearing in its relcache entry,
    2179             :              * because that entry will be held open and locked for the
    2180             :              * duration of this executor run.
    2181             :              */
    2182        1636 :             partrel = ExecGetRangeTableRelation(estate, pinfo->rtindex, false);
    2183             : 
    2184             :             /* Remember for InitExecPartitionPruneContexts(). */
    2185        1636 :             pprune->partrel = partrel;
    2186             : 
    2187        1636 :             partkey = RelationGetPartitionKey(partrel);
    2188        1636 :             partdesc = PartitionDirectoryLookup(estate->es_partition_directory,
    2189             :                                                 partrel);
    2190             : 
    2191             :             /*
    2192             :              * Initialize the subplan_map and subpart_map.
    2193             :              *
    2194             :              * The set of partitions that exist now might not be the same that
    2195             :              * existed when the plan was made.  The normal case is that it is;
    2196             :              * optimize for that case with a quick comparison, and just copy
    2197             :              * the subplan_map and make subpart_map, leafpart_rti_map point to
    2198             :              * the ones in PruneInfo.
    2199             :              *
    2200             :              * For the case where they aren't identical, we could have more
    2201             :              * partitions on either side; or even exactly the same number of
    2202             :              * them on both but the set of OIDs doesn't match fully.  Handle
    2203             :              * this by creating new subplan_map and subpart_map arrays that
    2204             :              * corresponds to the ones in the PruneInfo where the new
    2205             :              * partition descriptor's OIDs match.  Any that don't match can be
    2206             :              * set to -1, as if they were pruned.  By construction, both
    2207             :              * arrays are in partition bounds order.
    2208             :              */
    2209        1636 :             pprune->nparts = partdesc->nparts;
    2210        1636 :             pprune->subplan_map = palloc_array(int, partdesc->nparts);
    2211             : 
    2212        1636 :             if (partdesc->nparts == pinfo->nparts &&
    2213        1634 :                 memcmp(partdesc->oids, pinfo->relid_map,
    2214        1634 :                        sizeof(int) * partdesc->nparts) == 0)
    2215             :             {
    2216        1512 :                 pprune->subpart_map = pinfo->subpart_map;
    2217        1512 :                 pprune->leafpart_rti_map = pinfo->leafpart_rti_map;
    2218        1512 :                 memcpy(pprune->subplan_map, pinfo->subplan_map,
    2219        1512 :                        sizeof(int) * pinfo->nparts);
    2220             :             }
    2221             :             else
    2222             :             {
    2223         124 :                 int         pd_idx = 0;
    2224             :                 int         pp_idx;
    2225             : 
    2226             :                 /*
    2227             :                  * When the partition arrays are not identical, there could be
    2228             :                  * some new ones but it's also possible that one was removed;
    2229             :                  * we cope with both situations by walking the arrays and
    2230             :                  * discarding those that don't match.
    2231             :                  *
    2232             :                  * If the number of partitions on both sides match, it's still
    2233             :                  * possible that one partition has been detached and another
    2234             :                  * attached.  Cope with that by creating a map that skips any
    2235             :                  * mismatches.
    2236             :                  */
    2237         124 :                 pprune->subpart_map = palloc_array(int, partdesc->nparts);
    2238         124 :                 pprune->leafpart_rti_map = palloc_array(int, partdesc->nparts);
    2239             : 
    2240         528 :                 for (pp_idx = 0; pp_idx < partdesc->nparts; pp_idx++)
    2241             :                 {
    2242             :                     /* Skip any InvalidOid relid_map entries */
    2243         624 :                     while (pd_idx < pinfo->nparts &&
    2244         504 :                            !OidIsValid(pinfo->relid_map[pd_idx]))
    2245         220 :                         pd_idx++;
    2246             : 
    2247         404 :             recheck:
    2248         404 :                     if (pd_idx < pinfo->nparts &&
    2249         284 :                         pinfo->relid_map[pd_idx] == partdesc->oids[pp_idx])
    2250             :                     {
    2251             :                         /* match... */
    2252         182 :                         pprune->subplan_map[pp_idx] =
    2253         182 :                             pinfo->subplan_map[pd_idx];
    2254         182 :                         pprune->subpart_map[pp_idx] =
    2255         182 :                             pinfo->subpart_map[pd_idx];
    2256         182 :                         pprune->leafpart_rti_map[pp_idx] =
    2257         182 :                             pinfo->leafpart_rti_map[pd_idx];
    2258         182 :                         pd_idx++;
    2259         182 :                         continue;
    2260             :                     }
    2261             : 
    2262             :                     /*
    2263             :                      * There isn't an exact match in the corresponding
    2264             :                      * positions of both arrays.  Peek ahead in
    2265             :                      * pinfo->relid_map to see if we have a match for the
    2266             :                      * current partition in partdesc.  Normally if a match
    2267             :                      * exists it's just one element ahead, and it means the
    2268             :                      * planner saw one extra partition that we no longer see
    2269             :                      * now (its concurrent detach finished just in between);
    2270             :                      * so we skip that one by updating pd_idx to the new
    2271             :                      * location and jumping above.  We can then continue to
    2272             :                      * match the rest of the elements after skipping the OID
    2273             :                      * with no match; no future matches are tried for the
    2274             :                      * element that was skipped, because we know the arrays to
    2275             :                      * be in the same order.
    2276             :                      *
    2277             :                      * If we don't see a match anywhere in the rest of the
    2278             :                      * pinfo->relid_map array, that means we see an element
    2279             :                      * now that the planner didn't see, so mark that one as
    2280             :                      * pruned and move on.
    2281             :                      */
    2282         288 :                     for (int pd_idx2 = pd_idx + 1; pd_idx2 < pinfo->nparts; pd_idx2++)
    2283             :                     {
    2284          66 :                         if (pd_idx2 >= pinfo->nparts)
    2285           0 :                             break;
    2286          66 :                         if (pinfo->relid_map[pd_idx2] == partdesc->oids[pp_idx])
    2287             :                         {
    2288           0 :                             pd_idx = pd_idx2;
    2289           0 :                             goto recheck;
    2290             :                         }
    2291             :                     }
    2292             : 
    2293         222 :                     pprune->subpart_map[pp_idx] = -1;
    2294         222 :                     pprune->subplan_map[pp_idx] = -1;
    2295         222 :                     pprune->leafpart_rti_map[pp_idx] = 0;
    2296             :                 }
    2297             :             }
    2298             : 
    2299             :             /* present_parts is also subject to later modification */
    2300        1636 :             pprune->present_parts = bms_copy(pinfo->present_parts);
    2301             : 
    2302             :             /*
    2303             :              * Only initial_context is initialized here.  exec_context is
    2304             :              * initialized during ExecInitPartitionExecPruning() when the
    2305             :              * parent plan's PlanState is available.
    2306             :              *
    2307             :              * Note that we must skip execution-time (both "init" and "exec")
    2308             :              * partition pruning in EXPLAIN (GENERIC_PLAN), since parameter
    2309             :              * values may be missing.
    2310             :              */
    2311        1636 :             pprune->initial_pruning_steps = pinfo->initial_pruning_steps;
    2312        1636 :             if (pinfo->initial_pruning_steps &&
    2313         556 :                 !(econtext->ecxt_estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC))
    2314             :             {
    2315         550 :                 InitPartitionPruneContext(&pprune->initial_context,
    2316             :                                           pprune->initial_pruning_steps,
    2317             :                                           partdesc, partkey, NULL,
    2318             :                                           econtext);
    2319             :                 /* Record whether initial pruning is needed at any level */
    2320         550 :                 prunestate->do_initial_prune = true;
    2321             :             }
    2322        1636 :             pprune->exec_pruning_steps = pinfo->exec_pruning_steps;
    2323        1636 :             if (pinfo->exec_pruning_steps &&
    2324         510 :                 !(econtext->ecxt_estate->es_top_eflags & EXEC_FLAG_EXPLAIN_GENERIC))
    2325             :             {
    2326             :                 /* Record whether exec pruning is needed at any level */
    2327         510 :                 prunestate->do_exec_prune = true;
    2328             :             }
    2329             : 
    2330             :             /*
    2331             :              * Accumulate the IDs of all PARAM_EXEC Params affecting the
    2332             :              * partitioning decisions at this plan node.
    2333             :              */
    2334        3272 :             prunestate->execparamids = bms_add_members(prunestate->execparamids,
    2335        1636 :                                                        pinfo->execparamids);
    2336             : 
    2337             :             /*
    2338             :              * Return all leaf partition indexes if we're skipping pruning in
    2339             :              * the EXPLAIN (GENERIC_PLAN) case.
    2340             :              */
    2341        1636 :             if (pinfo->initial_pruning_steps && !prunestate->do_initial_prune)
    2342             :             {
    2343           6 :                 int         part_index = -1;
    2344             : 
    2345          18 :                 while ((part_index = bms_next_member(pprune->present_parts,
    2346          18 :                                                      part_index)) >= 0)
    2347             :                 {
    2348          12 :                     Index       rtindex = pprune->leafpart_rti_map[part_index];
    2349             : 
    2350          12 :                     if (rtindex)
    2351          12 :                         *all_leafpart_rtis = bms_add_member(*all_leafpart_rtis,
    2352             :                                                             rtindex);
    2353             :                 }
    2354             :             }
    2355             : 
    2356        1636 :             j++;
    2357             :         }
    2358         826 :         i++;
    2359             :     }
    2360             : 
    2361         802 :     return prunestate;
    2362             : }
    2363             : 
    2364             : /*
    2365             :  * Initialize a PartitionPruneContext for the given list of pruning steps.
    2366             :  */
    2367             : static void
    2368        1062 : InitPartitionPruneContext(PartitionPruneContext *context,
    2369             :                           List *pruning_steps,
    2370             :                           PartitionDesc partdesc,
    2371             :                           PartitionKey partkey,
    2372             :                           PlanState *planstate,
    2373             :                           ExprContext *econtext)
    2374             : {
    2375             :     int         n_steps;
    2376             :     int         partnatts;
    2377             :     ListCell   *lc;
    2378             : 
    2379        1062 :     n_steps = list_length(pruning_steps);
    2380             : 
    2381        1062 :     context->strategy = partkey->strategy;
    2382        1062 :     context->partnatts = partnatts = partkey->partnatts;
    2383        1062 :     context->nparts = partdesc->nparts;
    2384        1062 :     context->boundinfo = partdesc->boundinfo;
    2385        1062 :     context->partcollation = partkey->partcollation;
    2386        1062 :     context->partsupfunc = partkey->partsupfunc;
    2387             : 
    2388             :     /* We'll look up type-specific support functions as needed */
    2389        1062 :     context->stepcmpfuncs = palloc0_array(FmgrInfo, n_steps * partnatts);
    2390             : 
    2391        1062 :     context->ppccontext = CurrentMemoryContext;
    2392        1062 :     context->planstate = planstate;
    2393        1062 :     context->exprcontext = econtext;
    2394             : 
    2395             :     /* Initialize expression state for each expression we need */
    2396        1062 :     context->exprstates = palloc0_array(ExprState *, n_steps * partnatts);
    2397        2786 :     foreach(lc, pruning_steps)
    2398             :     {
    2399        1724 :         PartitionPruneStepOp *step = (PartitionPruneStepOp *) lfirst(lc);
    2400        1724 :         ListCell   *lc2 = list_head(step->exprs);
    2401             :         int         keyno;
    2402             : 
    2403             :         /* not needed for other step kinds */
    2404        1724 :         if (!IsA(step, PartitionPruneStepOp))
    2405         286 :             continue;
    2406             : 
    2407             :         Assert(list_length(step->exprs) <= partnatts);
    2408             : 
    2409        3026 :         for (keyno = 0; keyno < partnatts; keyno++)
    2410             :         {
    2411        1588 :             if (bms_is_member(keyno, step->nullkeys))
    2412           6 :                 continue;
    2413             : 
    2414        1582 :             if (lc2 != NULL)
    2415             :             {
    2416        1486 :                 Expr       *expr = lfirst(lc2);
    2417             : 
    2418             :                 /* not needed for Consts */
    2419        1486 :                 if (!IsA(expr, Const))
    2420             :                 {
    2421        1392 :                     int         stateidx = PruneCxtStateIdx(partnatts,
    2422             :                                                             step->step.step_id,
    2423             :                                                             keyno);
    2424             : 
    2425             :                     /*
    2426             :                      * When planstate is NULL, pruning_steps is known not to
    2427             :                      * contain any expressions that depend on the parent plan.
    2428             :                      * Information of any available EXTERN parameters must be
    2429             :                      * passed explicitly in that case, which the caller must
    2430             :                      * have made available via econtext.
    2431             :                      */
    2432        1392 :                     if (planstate == NULL)
    2433         814 :                         context->exprstates[stateidx] =
    2434         814 :                             ExecInitExprWithParams(expr,
    2435             :                                                    econtext->ecxt_param_list_info);
    2436             :                     else
    2437         578 :                         context->exprstates[stateidx] =
    2438         578 :                             ExecInitExpr(expr, context->planstate);
    2439             :                 }
    2440        1486 :                 lc2 = lnext(step->exprs, lc2);
    2441             :             }
    2442             :         }
    2443             :     }
    2444        1062 : }
    2445             : 
    2446             : /*
    2447             :  * InitExecPartitionPruneContexts
    2448             :  *      Initialize exec pruning contexts deferred by CreatePartitionPruneState()
    2449             :  *
    2450             :  * This function finalizes exec pruning setup for a PartitionPruneState by
    2451             :  * initializing contexts for pruning steps that require the parent plan's
    2452             :  * PlanState. It iterates over PartitionPruningData entries and sets up the
    2453             :  * necessary execution contexts for pruning during query execution.
    2454             :  *
    2455             :  * Also fix the mapping of partition indexes to subplan indexes contained in
    2456             :  * prunestate by considering the new list of subplans that survived initial
    2457             :  * pruning.
    2458             :  *
    2459             :  * Current values of the indexes present in PartitionPruneState count all the
    2460             :  * subplans that would be present before initial pruning was done.  If initial
    2461             :  * pruning got rid of some of the subplans, any subsequent pruning passes will
    2462             :  * be looking at a different set of target subplans to choose from than those
    2463             :  * in the pre-initial-pruning set, so the maps in PartitionPruneState
    2464             :  * containing those indexes must be updated to reflect the new indexes of
    2465             :  * subplans in the post-initial-pruning set.
    2466             :  */
    2467             : static void
    2468         398 : InitExecPartitionPruneContexts(PartitionPruneState *prunestate,
    2469             :                                PlanState *parent_plan,
    2470             :                                Bitmapset *initially_valid_subplans,
    2471             :                                int n_total_subplans)
    2472             : {
    2473             :     EState     *estate;
    2474         398 :     int        *new_subplan_indexes = NULL;
    2475             :     Bitmapset  *new_other_subplans;
    2476             :     int         i;
    2477             :     int         newidx;
    2478         398 :     bool        fix_subplan_map = false;
    2479             : 
    2480             :     Assert(prunestate->do_exec_prune);
    2481             :     Assert(parent_plan != NULL);
    2482         398 :     estate = parent_plan->state;
    2483             : 
    2484             :     /*
    2485             :      * No need to fix subplans maps if initial pruning didn't eliminate any
    2486             :      * subplans.
    2487             :      */
    2488         398 :     if (bms_num_members(initially_valid_subplans) < n_total_subplans)
    2489             :     {
    2490          48 :         fix_subplan_map = true;
    2491             : 
    2492             :         /*
    2493             :          * First we must build a temporary array which maps old subplan
    2494             :          * indexes to new ones.  For convenience of initialization, we use
    2495             :          * 1-based indexes in this array and leave pruned items as 0.
    2496             :          */
    2497          48 :         new_subplan_indexes = palloc0_array(int, n_total_subplans);
    2498          48 :         newidx = 1;
    2499          48 :         i = -1;
    2500         186 :         while ((i = bms_next_member(initially_valid_subplans, i)) >= 0)
    2501             :         {
    2502             :             Assert(i < n_total_subplans);
    2503         138 :             new_subplan_indexes[i] = newidx++;
    2504             :         }
    2505             :     }
    2506             : 
    2507             :     /*
    2508             :      * Now we can update each PartitionedRelPruneInfo's subplan_map with new
    2509             :      * subplan indexes.  We must also recompute its present_parts bitmap.
    2510             :      */
    2511         820 :     for (i = 0; i < prunestate->num_partprunedata; i++)
    2512             :     {
    2513         422 :         PartitionPruningData *prunedata = prunestate->partprunedata[i];
    2514             :         int         j;
    2515             : 
    2516             :         /*
    2517             :          * Within each hierarchy, we perform this loop in back-to-front order
    2518             :          * so that we determine present_parts for the lowest-level partitioned
    2519             :          * tables first.  This way we can tell whether a sub-partitioned
    2520             :          * table's partitions were entirely pruned so we can exclude it from
    2521             :          * the current level's present_parts.
    2522             :          */
    2523        1300 :         for (j = prunedata->num_partrelprunedata - 1; j >= 0; j--)
    2524             :         {
    2525         878 :             PartitionedRelPruningData *pprune = &prunedata->partrelprunedata[j];
    2526         878 :             int         nparts = pprune->nparts;
    2527             :             int         k;
    2528             : 
    2529             :             /* Initialize PartitionPruneContext for exec pruning, if needed. */
    2530         878 :             if (pprune->exec_pruning_steps != NIL)
    2531             :             {
    2532             :                 PartitionKey partkey;
    2533             :                 PartitionDesc partdesc;
    2534             : 
    2535             :                 /*
    2536             :                  * See the comment in CreatePartitionPruneState() regarding
    2537             :                  * the usage of partdesc and partkey.
    2538             :                  */
    2539         512 :                 partkey = RelationGetPartitionKey(pprune->partrel);
    2540         512 :                 partdesc = PartitionDirectoryLookup(estate->es_partition_directory,
    2541             :                                                     pprune->partrel);
    2542             : 
    2543         512 :                 InitPartitionPruneContext(&pprune->exec_context,
    2544             :                                           pprune->exec_pruning_steps,
    2545             :                                           partdesc, partkey, parent_plan,
    2546             :                                           prunestate->econtext);
    2547             :             }
    2548             : 
    2549         878 :             if (!fix_subplan_map)
    2550         686 :                 continue;
    2551             : 
    2552             :             /* We just rebuild present_parts from scratch */
    2553         192 :             bms_free(pprune->present_parts);
    2554         192 :             pprune->present_parts = NULL;
    2555             : 
    2556         708 :             for (k = 0; k < nparts; k++)
    2557             :             {
    2558         516 :                 int         oldidx = pprune->subplan_map[k];
    2559             :                 int         subidx;
    2560             : 
    2561             :                 /*
    2562             :                  * If this partition existed as a subplan then change the old
    2563             :                  * subplan index to the new subplan index.  The new index may
    2564             :                  * become -1 if the partition was pruned above, or it may just
    2565             :                  * come earlier in the subplan list due to some subplans being
    2566             :                  * removed earlier in the list.  If it's a subpartition, add
    2567             :                  * it to present_parts unless it's entirely pruned.
    2568             :                  */
    2569         516 :                 if (oldidx >= 0)
    2570             :                 {
    2571             :                     Assert(oldidx < n_total_subplans);
    2572         396 :                     pprune->subplan_map[k] = new_subplan_indexes[oldidx] - 1;
    2573             : 
    2574         396 :                     if (new_subplan_indexes[oldidx] > 0)
    2575         114 :                         pprune->present_parts =
    2576         114 :                             bms_add_member(pprune->present_parts, k);
    2577             :                 }
    2578         120 :                 else if ((subidx = pprune->subpart_map[k]) >= 0)
    2579             :                 {
    2580             :                     PartitionedRelPruningData *subprune;
    2581             : 
    2582         120 :                     subprune = &prunedata->partrelprunedata[subidx];
    2583             : 
    2584         120 :                     if (!bms_is_empty(subprune->present_parts))
    2585          48 :                         pprune->present_parts =
    2586          48 :                             bms_add_member(pprune->present_parts, k);
    2587             :                 }
    2588             :             }
    2589             :         }
    2590             :     }
    2591             : 
    2592             :     /*
    2593             :      * If we fixed subplan maps, we must also recompute the other_subplans
    2594             :      * set, since indexes in it may change.
    2595             :      */
    2596         398 :     if (fix_subplan_map)
    2597             :     {
    2598          48 :         new_other_subplans = NULL;
    2599          48 :         i = -1;
    2600          72 :         while ((i = bms_next_member(prunestate->other_subplans, i)) >= 0)
    2601          24 :             new_other_subplans = bms_add_member(new_other_subplans,
    2602          24 :                                                 new_subplan_indexes[i] - 1);
    2603             : 
    2604          48 :         bms_free(prunestate->other_subplans);
    2605          48 :         prunestate->other_subplans = new_other_subplans;
    2606             : 
    2607          48 :         pfree(new_subplan_indexes);
    2608             :     }
    2609         398 : }
    2610             : 
    2611             : /*
    2612             :  * ExecFindMatchingSubPlans
    2613             :  *      Determine which subplans match the pruning steps detailed in
    2614             :  *      'prunestate' for the current comparison expression values.
    2615             :  *
    2616             :  * Pass initial_prune if PARAM_EXEC Params cannot yet be evaluated.  This
    2617             :  * differentiates the initial executor-time pruning step from later
    2618             :  * runtime pruning.
    2619             :  *
    2620             :  * The caller must pass a non-NULL validsubplan_rtis during initial pruning
    2621             :  * to collect the RT indexes of leaf partitions whose subnodes will be
    2622             :  * executed.  These RT indexes are later added to EState.es_unpruned_relids.
    2623             :  */
    2624             : Bitmapset *
    2625        3898 : ExecFindMatchingSubPlans(PartitionPruneState *prunestate,
    2626             :                          bool initial_prune,
    2627             :                          Bitmapset **validsubplan_rtis)
    2628             : {
    2629        3898 :     Bitmapset  *result = NULL;
    2630             :     MemoryContext oldcontext;
    2631             :     int         i;
    2632             : 
    2633             :     /*
    2634             :      * Either we're here on the initial prune done during pruning
    2635             :      * initialization, or we're at a point where PARAM_EXEC Params can be
    2636             :      * evaluated *and* there are steps in which to do so.
    2637             :      */
    2638             :     Assert(initial_prune || prunestate->do_exec_prune);
    2639             :     Assert(validsubplan_rtis != NULL || !initial_prune);
    2640             : 
    2641             :     /*
    2642             :      * Switch to a temp context to avoid leaking memory in the executor's
    2643             :      * query-lifespan memory context.
    2644             :      */
    2645        3898 :     oldcontext = MemoryContextSwitchTo(prunestate->prune_context);
    2646             : 
    2647             :     /*
    2648             :      * For each hierarchy, do the pruning tests, and add nondeletable
    2649             :      * subplans' indexes to "result".
    2650             :      */
    2651        7838 :     for (i = 0; i < prunestate->num_partprunedata; i++)
    2652             :     {
    2653        3940 :         PartitionPruningData *prunedata = prunestate->partprunedata[i];
    2654             :         PartitionedRelPruningData *pprune;
    2655             : 
    2656             :         /*
    2657             :          * We pass the zeroth item, belonging to the root table of the
    2658             :          * hierarchy, and find_matching_subplans_recurse() takes care of
    2659             :          * recursing to other (lower-level) parents as needed.
    2660             :          */
    2661        3940 :         pprune = &prunedata->partrelprunedata[0];
    2662        3940 :         find_matching_subplans_recurse(prunedata, pprune, initial_prune,
    2663             :                                        &result, validsubplan_rtis);
    2664             : 
    2665             :         /*
    2666             :          * Expression eval may have used space in ExprContext too. Avoid
    2667             :          * accessing exec_context during initial pruning, as it is not valid
    2668             :          * at that stage.
    2669             :          */
    2670        3940 :         if (!initial_prune && pprune->exec_pruning_steps)
    2671        3396 :             ResetExprContext(pprune->exec_context.exprcontext);
    2672             :     }
    2673             : 
    2674             :     /* Add in any subplans that partition pruning didn't account for */
    2675        3898 :     result = bms_add_members(result, prunestate->other_subplans);
    2676             : 
    2677        3898 :     MemoryContextSwitchTo(oldcontext);
    2678             : 
    2679             :     /* Copy result out of the temp context before we reset it */
    2680        3898 :     result = bms_copy(result);
    2681        3898 :     if (validsubplan_rtis)
    2682         448 :         *validsubplan_rtis = bms_copy(*validsubplan_rtis);
    2683             : 
    2684        3898 :     MemoryContextReset(prunestate->prune_context);
    2685             : 
    2686        3898 :     return result;
    2687             : }
    2688             : 
    2689             : /*
    2690             :  * find_matching_subplans_recurse
    2691             :  *      Recursive worker function for ExecFindMatchingSubPlans
    2692             :  *
    2693             :  * Adds valid (non-prunable) subplan IDs to *validsubplans. If
    2694             :  * *validsubplan_rtis is non-NULL, it also adds the RT indexes of their
    2695             :  * corresponding partitions, but only if they are leaf partitions.
    2696             :  */
    2697             : static void
    2698        4354 : find_matching_subplans_recurse(PartitionPruningData *prunedata,
    2699             :                                PartitionedRelPruningData *pprune,
    2700             :                                bool initial_prune,
    2701             :                                Bitmapset **validsubplans,
    2702             :                                Bitmapset **validsubplan_rtis)
    2703             : {
    2704             :     Bitmapset  *partset;
    2705             :     int         i;
    2706             : 
    2707             :     /* Guard against stack overflow due to overly deep partition hierarchy. */
    2708        4354 :     check_stack_depth();
    2709             : 
    2710             :     /*
    2711             :      * Prune as appropriate, if we have pruning steps matching the current
    2712             :      * execution context.  Otherwise just include all partitions at this
    2713             :      * level.
    2714             :      */
    2715        4354 :     if (initial_prune && pprune->initial_pruning_steps)
    2716         532 :         partset = get_matching_partitions(&pprune->initial_context,
    2717             :                                           pprune->initial_pruning_steps);
    2718        3822 :     else if (!initial_prune && pprune->exec_pruning_steps)
    2719        3480 :         partset = get_matching_partitions(&pprune->exec_context,
    2720             :                                           pprune->exec_pruning_steps);
    2721             :     else
    2722         342 :         partset = pprune->present_parts;
    2723             : 
    2724             :     /* Translate partset into subplan indexes */
    2725        4354 :     i = -1;
    2726        6164 :     while ((i = bms_next_member(partset, i)) >= 0)
    2727             :     {
    2728        1810 :         if (pprune->subplan_map[i] >= 0)
    2729             :         {
    2730        2788 :             *validsubplans = bms_add_member(*validsubplans,
    2731        1394 :                                             pprune->subplan_map[i]);
    2732             : 
    2733             :             /*
    2734             :              * Only report leaf partitions. Non-leaf partitions may appear
    2735             :              * here when they use an unflattened Append or MergeAppend.
    2736             :              */
    2737        1394 :             if (validsubplan_rtis && pprune->leafpart_rti_map[i])
    2738         674 :                 *validsubplan_rtis = bms_add_member(*validsubplan_rtis,
    2739         674 :                                                     pprune->leafpart_rti_map[i]);
    2740             :         }
    2741             :         else
    2742             :         {
    2743         416 :             int         partidx = pprune->subpart_map[i];
    2744             : 
    2745         416 :             if (partidx >= 0)
    2746         414 :                 find_matching_subplans_recurse(prunedata,
    2747             :                                                &prunedata->partrelprunedata[partidx],
    2748             :                                                initial_prune, validsubplans,
    2749             :                                                validsubplan_rtis);
    2750             :             else
    2751             :             {
    2752             :                 /*
    2753             :                  * We get here if the planner already pruned all the sub-
    2754             :                  * partitions for this partition.  Silently ignore this
    2755             :                  * partition in this case.  The end result is the same: we
    2756             :                  * would have pruned all partitions just the same, but we
    2757             :                  * don't have any pruning steps to execute to verify this.
    2758             :                  */
    2759             :             }
    2760             :         }
    2761             :     }
    2762        4354 : }

Generated by: LCOV version 1.16