LCOV - code coverage report
Current view: top level - src/backend/executor - nodeModifyTable.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13devel Lines: 707 754 93.8 %
Date: 2019-11-22 08:06:54 Functions: 19 20 95.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeModifyTable.c
       4             :  *    routines to handle ModifyTable nodes.
       5             :  *
       6             :  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/executor/nodeModifyTable.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : /* INTERFACE ROUTINES
      16             :  *      ExecInitModifyTable - initialize the ModifyTable node
      17             :  *      ExecModifyTable     - retrieve the next tuple from the node
      18             :  *      ExecEndModifyTable  - shut down the ModifyTable node
      19             :  *      ExecReScanModifyTable - rescan the ModifyTable node
      20             :  *
      21             :  *   NOTES
      22             :  *      Each ModifyTable node contains a list of one or more subplans,
      23             :  *      much like an Append node.  There is one subplan per result relation.
      24             :  *      The key reason for this is that in an inherited UPDATE command, each
      25             :  *      result relation could have a different schema (more or different
      26             :  *      columns) requiring a different plan tree to produce it.  In an
      27             :  *      inherited DELETE, all the subplans should produce the same output
      28             :  *      rowtype, but we might still find that different plans are appropriate
      29             :  *      for different child relations.
      30             :  *
      31             :  *      If the query specifies RETURNING, then the ModifyTable returns a
      32             :  *      RETURNING tuple after completing each row insert, update, or delete.
      33             :  *      It must be called again to continue the operation.  Without RETURNING,
      34             :  *      we just loop within the node until all the work is done, then
      35             :  *      return NULL.  This avoids useless call/return overhead.
      36             :  */
      37             : 
      38             : #include "postgres.h"
      39             : 
      40             : #include "access/heapam.h"
      41             : #include "access/htup_details.h"
      42             : #include "access/tableam.h"
      43             : #include "access/xact.h"
      44             : #include "catalog/catalog.h"
      45             : #include "commands/trigger.h"
      46             : #include "executor/execPartition.h"
      47             : #include "executor/executor.h"
      48             : #include "executor/nodeModifyTable.h"
      49             : #include "foreign/fdwapi.h"
      50             : #include "miscadmin.h"
      51             : #include "nodes/nodeFuncs.h"
      52             : #include "rewrite/rewriteHandler.h"
      53             : #include "storage/bufmgr.h"
      54             : #include "storage/lmgr.h"
      55             : #include "utils/builtins.h"
      56             : #include "utils/datum.h"
      57             : #include "utils/memutils.h"
      58             : #include "utils/rel.h"
      59             : 
      60             : 
      61             : static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
      62             :                                  ResultRelInfo *resultRelInfo,
      63             :                                  ItemPointer conflictTid,
      64             :                                  TupleTableSlot *planSlot,
      65             :                                  TupleTableSlot *excludedSlot,
      66             :                                  EState *estate,
      67             :                                  bool canSetTag,
      68             :                                  TupleTableSlot **returning);
      69             : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
      70             :                                                EState *estate,
      71             :                                                PartitionTupleRouting *proute,
      72             :                                                ResultRelInfo *targetRelInfo,
      73             :                                                TupleTableSlot *slot);
      74             : static ResultRelInfo *getTargetResultRelInfo(ModifyTableState *node);
      75             : static void ExecSetupChildParentMapForSubplan(ModifyTableState *mtstate);
      76             : static TupleConversionMap *tupconv_map_for_subplan(ModifyTableState *node,
      77             :                                                    int whichplan);
      78             : 
      79             : /*
      80             :  * Verify that the tuples to be produced by INSERT or UPDATE match the
      81             :  * target relation's rowtype
      82             :  *
      83             :  * We do this to guard against stale plans.  If plan invalidation is
      84             :  * functioning properly then we should never get a failure here, but better
      85             :  * safe than sorry.  Note that this is called after we have obtained lock
      86             :  * on the target rel, so the rowtype can't change underneath us.
      87             :  *
      88             :  * The plan output is represented by its targetlist, because that makes
      89             :  * handling the dropped-column case easier.
      90             :  */
      91             : static void
      92       71190 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
      93             : {
      94       71190 :     TupleDesc   resultDesc = RelationGetDescr(resultRel);
      95       71190 :     int         attno = 0;
      96             :     ListCell   *lc;
      97             : 
      98      414016 :     foreach(lc, targetList)
      99             :     {
     100      342826 :         TargetEntry *tle = (TargetEntry *) lfirst(lc);
     101             :         Form_pg_attribute attr;
     102             : 
     103      342826 :         if (tle->resjunk)
     104       11854 :             continue;           /* ignore junk tlist items */
     105             : 
     106      330972 :         if (attno >= resultDesc->natts)
     107           0 :             ereport(ERROR,
     108             :                     (errcode(ERRCODE_DATATYPE_MISMATCH),
     109             :                      errmsg("table row type and query-specified row type do not match"),
     110             :                      errdetail("Query has too many columns.")));
     111      330972 :         attr = TupleDescAttr(resultDesc, attno);
     112      330972 :         attno++;
     113             : 
     114      330972 :         if (!attr->attisdropped)
     115             :         {
     116             :             /* Normal case: demand type match */
     117      330410 :             if (exprType((Node *) tle->expr) != attr->atttypid)
     118           0 :                 ereport(ERROR,
     119             :                         (errcode(ERRCODE_DATATYPE_MISMATCH),
     120             :                          errmsg("table row type and query-specified row type do not match"),
     121             :                          errdetail("Table has type %s at ordinal position %d, but query expects %s.",
     122             :                                    format_type_be(attr->atttypid),
     123             :                                    attno,
     124             :                                    format_type_be(exprType((Node *) tle->expr)))));
     125             :         }
     126             :         else
     127             :         {
     128             :             /*
     129             :              * For a dropped column, we can't check atttypid (it's likely 0).
     130             :              * In any case the planner has most likely inserted an INT4 null.
     131             :              * What we insist on is just *some* NULL constant.
     132             :              */
     133        1124 :             if (!IsA(tle->expr, Const) ||
     134         562 :                 !((Const *) tle->expr)->constisnull)
     135           0 :                 ereport(ERROR,
     136             :                         (errcode(ERRCODE_DATATYPE_MISMATCH),
     137             :                          errmsg("table row type and query-specified row type do not match"),
     138             :                          errdetail("Query provides a value for a dropped column at ordinal position %d.",
     139             :                                    attno)));
     140             :         }
     141             :     }
     142       71190 :     if (attno != resultDesc->natts)
     143           0 :         ereport(ERROR,
     144             :                 (errcode(ERRCODE_DATATYPE_MISMATCH),
     145             :                  errmsg("table row type and query-specified row type do not match"),
     146             :                  errdetail("Query has too few columns.")));
     147       71190 : }
     148             : 
     149             : /*
     150             :  * ExecProcessReturning --- evaluate a RETURNING list
     151             :  *
     152             :  * resultRelInfo: current result rel
     153             :  * tupleSlot: slot holding tuple actually inserted/updated/deleted
     154             :  * planSlot: slot holding tuple returned by top subplan node
     155             :  *
     156             :  * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
     157             :  * scan tuple.
     158             :  *
     159             :  * Returns a slot holding the result tuple
     160             :  */
     161             : static TupleTableSlot *
     162        4346 : ExecProcessReturning(ResultRelInfo *resultRelInfo,
     163             :                      TupleTableSlot *tupleSlot,
     164             :                      TupleTableSlot *planSlot)
     165             : {
     166        4346 :     ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
     167        4346 :     ExprContext *econtext = projectReturning->pi_exprContext;
     168             : 
     169             :     /* Make tuple and any needed join variables available to ExecProject */
     170        4346 :     if (tupleSlot)
     171        3654 :         econtext->ecxt_scantuple = tupleSlot;
     172        4346 :     econtext->ecxt_outertuple = planSlot;
     173             : 
     174             :     /*
     175             :      * RETURNING expressions might reference the tableoid column, so
     176             :      * reinitialize tts_tableOid before evaluating them.
     177             :      */
     178        8692 :     econtext->ecxt_scantuple->tts_tableOid =
     179        4346 :         RelationGetRelid(resultRelInfo->ri_RelationDesc);
     180             : 
     181             :     /* Compute the RETURNING expressions */
     182        4346 :     return ExecProject(projectReturning);
     183             : }
     184             : 
     185             : /*
     186             :  * ExecCheckTupleVisible -- verify tuple is visible
     187             :  *
     188             :  * It would not be consistent with guarantees of the higher isolation levels to
     189             :  * proceed with avoiding insertion (taking speculative insertion's alternative
     190             :  * path) on the basis of another tuple that is not visible to MVCC snapshot.
     191             :  * Check for the need to raise a serialization failure, and do so as necessary.
     192             :  */
     193             : static void
     194        4984 : ExecCheckTupleVisible(EState *estate,
     195             :                       Relation rel,
     196             :                       TupleTableSlot *slot)
     197             : {
     198        4984 :     if (!IsolationUsesXactSnapshot())
     199        4924 :         return;
     200             : 
     201          60 :     if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
     202             :     {
     203             :         Datum       xminDatum;
     204             :         TransactionId xmin;
     205             :         bool        isnull;
     206             : 
     207          36 :         xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
     208             :         Assert(!isnull);
     209          36 :         xmin = DatumGetTransactionId(xminDatum);
     210             : 
     211             :         /*
     212             :          * We should not raise a serialization failure if the conflict is
     213             :          * against a tuple inserted by our own transaction, even if it's not
     214             :          * visible to our snapshot.  (This would happen, for example, if
     215             :          * conflicting keys are proposed for insertion in a single command.)
     216             :          */
     217          36 :         if (!TransactionIdIsCurrentTransactionId(xmin))
     218          20 :             ereport(ERROR,
     219             :                     (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
     220             :                      errmsg("could not serialize access due to concurrent update")));
     221             :     }
     222             : }
     223             : 
     224             : /*
     225             :  * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
     226             :  */
     227             : static void
     228         122 : ExecCheckTIDVisible(EState *estate,
     229             :                     ResultRelInfo *relinfo,
     230             :                     ItemPointer tid,
     231             :                     TupleTableSlot *tempSlot)
     232             : {
     233         122 :     Relation    rel = relinfo->ri_RelationDesc;
     234             : 
     235             :     /* Redundantly check isolation level */
     236         122 :     if (!IsolationUsesXactSnapshot())
     237          62 :         return;
     238             : 
     239          60 :     if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
     240           0 :         elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
     241          60 :     ExecCheckTupleVisible(estate, rel, tempSlot);
     242          40 :     ExecClearTuple(tempSlot);
     243             : }
     244             : 
     245             : /*
     246             :  * Compute stored generated columns for a tuple
     247             :  */
     248             : void
     249         342 : ExecComputeStoredGenerated(EState *estate, TupleTableSlot *slot)
     250             : {
     251         342 :     ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
     252         342 :     Relation    rel = resultRelInfo->ri_RelationDesc;
     253         342 :     TupleDesc   tupdesc = RelationGetDescr(rel);
     254         342 :     int         natts = tupdesc->natts;
     255             :     MemoryContext oldContext;
     256             :     Datum      *values;
     257             :     bool       *nulls;
     258             : 
     259             :     Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
     260             : 
     261             :     /*
     262             :      * If first time through for this result relation, build expression
     263             :      * nodetrees for rel's stored generation expressions.  Keep them in the
     264             :      * per-query memory context so they'll survive throughout the query.
     265             :      */
     266         342 :     if (resultRelInfo->ri_GeneratedExprs == NULL)
     267             :     {
     268         238 :         oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
     269             : 
     270         238 :         resultRelInfo->ri_GeneratedExprs =
     271         238 :             (ExprState **) palloc(natts * sizeof(ExprState *));
     272             : 
     273         734 :         for (int i = 0; i < natts; i++)
     274             :         {
     275         496 :             if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
     276             :             {
     277             :                 Expr       *expr;
     278             : 
     279         238 :                 expr = (Expr *) build_column_default(rel, i + 1);
     280         238 :                 if (expr == NULL)
     281           0 :                     elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
     282             :                          i + 1, RelationGetRelationName(rel));
     283             : 
     284         238 :                 resultRelInfo->ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
     285             :             }
     286             :         }
     287             : 
     288         238 :         MemoryContextSwitchTo(oldContext);
     289             :     }
     290             : 
     291         342 :     oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
     292             : 
     293         342 :     values = palloc(sizeof(*values) * natts);
     294         342 :     nulls = palloc(sizeof(*nulls) * natts);
     295             : 
     296         342 :     slot_getallattrs(slot);
     297         342 :     memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
     298             : 
     299        1046 :     for (int i = 0; i < natts; i++)
     300             :     {
     301         712 :         Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
     302             : 
     303         712 :         if (attr->attgenerated == ATTRIBUTE_GENERATED_STORED)
     304             :         {
     305             :             ExprContext *econtext;
     306             :             Datum       val;
     307             :             bool        isnull;
     308             : 
     309         342 :             econtext = GetPerTupleExprContext(estate);
     310         342 :             econtext->ecxt_scantuple = slot;
     311             : 
     312         342 :             val = ExecEvalExpr(resultRelInfo->ri_GeneratedExprs[i], econtext, &isnull);
     313             : 
     314         334 :             values[i] = val;
     315         334 :             nulls[i] = isnull;
     316             :         }
     317             :         else
     318             :         {
     319         370 :             if (!nulls[i])
     320         358 :                 values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
     321             :         }
     322             :     }
     323             : 
     324         334 :     ExecClearTuple(slot);
     325         334 :     memcpy(slot->tts_values, values, sizeof(*values) * natts);
     326         334 :     memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
     327         334 :     ExecStoreVirtualTuple(slot);
     328         334 :     ExecMaterializeSlot(slot);
     329             : 
     330         334 :     MemoryContextSwitchTo(oldContext);
     331         334 : }
     332             : 
     333             : /* ----------------------------------------------------------------
     334             :  *      ExecInsert
     335             :  *
     336             :  *      For INSERT, we have to insert the tuple into the target relation
     337             :  *      and insert appropriate tuples into the index relations.
     338             :  *
     339             :  *      Returns RETURNING result if any, otherwise NULL.
     340             :  * ----------------------------------------------------------------
     341             :  */
     342             : static TupleTableSlot *
     343    11373078 : ExecInsert(ModifyTableState *mtstate,
     344             :            TupleTableSlot *slot,
     345             :            TupleTableSlot *planSlot,
     346             :            EState *estate,
     347             :            bool canSetTag)
     348             : {
     349             :     ResultRelInfo *resultRelInfo;
     350             :     Relation    resultRelationDesc;
     351    11373078 :     List       *recheckIndexes = NIL;
     352    11373078 :     TupleTableSlot *result = NULL;
     353             :     TransitionCaptureState *ar_insert_trig_tcs;
     354    11373078 :     ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
     355    11373078 :     OnConflictAction onconflict = node->onConflictAction;
     356             : 
     357    11373078 :     ExecMaterializeSlot(slot);
     358             : 
     359             :     /*
     360             :      * get information on the (current) result relation
     361             :      */
     362    11373078 :     resultRelInfo = estate->es_result_relation_info;
     363    11373078 :     resultRelationDesc = resultRelInfo->ri_RelationDesc;
     364             : 
     365             :     /*
     366             :      * BEFORE ROW INSERT Triggers.
     367             :      *
     368             :      * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
     369             :      * INSERT ... ON CONFLICT statement.  We cannot check for constraint
     370             :      * violations before firing these triggers, because they can change the
     371             :      * values to insert.  Also, they can run arbitrary user-defined code with
     372             :      * side-effects that we can't cancel by just not inserting the tuple.
     373             :      */
     374    11422238 :     if (resultRelInfo->ri_TrigDesc &&
     375       49160 :         resultRelInfo->ri_TrigDesc->trig_insert_before_row)
     376             :     {
     377        1230 :         if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
     378          16 :             return NULL;        /* "do nothing" */
     379             :     }
     380             : 
     381             :     /* INSTEAD OF ROW INSERT Triggers */
     382    11422070 :     if (resultRelInfo->ri_TrigDesc &&
     383       49076 :         resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
     384             :     {
     385         184 :         if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
     386           4 :             return NULL;        /* "do nothing" */
     387             :     }
     388    11372900 :     else if (resultRelInfo->ri_FdwRoutine)
     389             :     {
     390             :         /*
     391             :          * Compute stored generated columns
     392             :          */
     393         752 :         if (resultRelationDesc->rd_att->constr &&
     394         360 :             resultRelationDesc->rd_att->constr->has_generated_stored)
     395           4 :             ExecComputeStoredGenerated(estate, slot);
     396             : 
     397             :         /*
     398             :          * insert into foreign table: let the FDW do it
     399             :          */
     400         392 :         slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
     401             :                                                                resultRelInfo,
     402             :                                                                slot,
     403             :                                                                planSlot);
     404             : 
     405         386 :         if (slot == NULL)       /* "do nothing" */
     406           4 :             return NULL;
     407             : 
     408             :         /*
     409             :          * AFTER ROW Triggers or RETURNING expressions might reference the
     410             :          * tableoid column, so (re-)initialize tts_tableOid before evaluating
     411             :          * them.
     412             :          */
     413         382 :         slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
     414             : 
     415             :     }
     416             :     else
     417             :     {
     418             :         WCOKind     wco_kind;
     419             : 
     420             :         /*
     421             :          * Constraints might reference the tableoid column, so (re-)initialize
     422             :          * tts_tableOid before evaluating them.
     423             :          */
     424    11372508 :         slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
     425             : 
     426             :         /*
     427             :          * Compute stored generated columns
     428             :          */
     429    17377078 :         if (resultRelationDesc->rd_att->constr &&
     430     6004570 :             resultRelationDesc->rd_att->constr->has_generated_stored)
     431         264 :             ExecComputeStoredGenerated(estate, slot);
     432             : 
     433             :         /*
     434             :          * Check any RLS WITH CHECK policies.
     435             :          *
     436             :          * Normally we should check INSERT policies. But if the insert is the
     437             :          * result of a partition key update that moved the tuple to a new
     438             :          * partition, we should instead check UPDATE policies, because we are
     439             :          * executing policies defined on the target table, and not those
     440             :          * defined on the child partitions.
     441             :          */
     442    22745000 :         wco_kind = (mtstate->operation == CMD_UPDATE) ?
     443    11372500 :             WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
     444             : 
     445             :         /*
     446             :          * ExecWithCheckOptions() will skip any WCOs which are not of the kind
     447             :          * we are looking for at this point.
     448             :          */
     449    11372500 :         if (resultRelInfo->ri_WithCheckOptions != NIL)
     450         344 :             ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
     451             : 
     452             :         /*
     453             :          * Check the constraints of the tuple.
     454             :          */
     455    11372388 :         if (resultRelationDesc->rd_att->constr)
     456     6004514 :             ExecConstraints(resultRelInfo, slot, estate);
     457             : 
     458             :         /*
     459             :          * Also check the tuple against the partition constraint, if there is
     460             :          * one; except that if we got here via tuple-routing, we don't need to
     461             :          * if there's no BR trigger defined on the partition.
     462             :          */
     463    11862914 :         if (resultRelInfo->ri_PartitionCheck &&
     464      981382 :             (resultRelInfo->ri_PartitionRoot == NULL ||
     465      491040 :              (resultRelInfo->ri_TrigDesc &&
     466         504 :               resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
     467         402 :             ExecPartitionCheck(resultRelInfo, slot, estate, true);
     468             : 
     469    11371960 :         if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
     470        3882 :         {
     471             :             /* Perform a speculative insertion. */
     472             :             uint32      specToken;
     473             :             ItemPointerData conflictTid;
     474             :             bool        specConflict;
     475             :             List       *arbiterIndexes;
     476             : 
     477        8952 :             arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
     478             : 
     479             :             /*
     480             :              * Do a non-conclusive check for conflicts first.
     481             :              *
     482             :              * We're not holding any locks yet, so this doesn't guarantee that
     483             :              * the later insert won't conflict.  But it avoids leaving behind
     484             :              * a lot of canceled speculative insertions, if you run a lot of
     485             :              * INSERT ON CONFLICT statements that do conflict.
     486             :              *
     487             :              * We loop back here if we find a conflict below, either during
     488             :              * the pre-check, or when we re-check after inserting the tuple
     489             :              * speculatively.
     490             :              */
     491             :     vlock:
     492        8960 :             specConflict = false;
     493        8960 :             if (!ExecCheckIndexConstraints(slot, estate, &conflictTid,
     494             :                                            arbiterIndexes))
     495             :             {
     496             :                 /* committed conflict tuple found */
     497        5062 :                 if (onconflict == ONCONFLICT_UPDATE)
     498             :                 {
     499             :                     /*
     500             :                      * In case of ON CONFLICT DO UPDATE, execute the UPDATE
     501             :                      * part.  Be prepared to retry if the UPDATE fails because
     502             :                      * of another concurrent UPDATE/DELETE to the conflict
     503             :                      * tuple.
     504             :                      */
     505        4940 :                     TupleTableSlot *returning = NULL;
     506             : 
     507        4940 :                     if (ExecOnConflictUpdate(mtstate, resultRelInfo,
     508             :                                              &conflictTid, planSlot, slot,
     509             :                                              estate, canSetTag, &returning))
     510             :                     {
     511        4888 :                         InstrCountTuples2(&mtstate->ps, 1);
     512        4888 :                         return returning;
     513             :                     }
     514             :                     else
     515           0 :                         goto vlock;
     516             :                 }
     517             :                 else
     518             :                 {
     519             :                     /*
     520             :                      * In case of ON CONFLICT DO NOTHING, do nothing. However,
     521             :                      * verify that the tuple is visible to the executor's MVCC
     522             :                      * snapshot at higher isolation levels.
     523             :                      *
     524             :                      * Using ExecGetReturningSlot() to store the tuple for the
     525             :                      * recheck isn't that pretty, but we can't trivially use
     526             :                      * the input slot, because it might not be of a compatible
     527             :                      * type. As there's no conflicting usage of
     528             :                      * ExecGetReturningSlot() in the DO NOTHING case...
     529             :                      */
     530             :                     Assert(onconflict == ONCONFLICT_NOTHING);
     531         122 :                     ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
     532             :                                         ExecGetReturningSlot(estate, resultRelInfo));
     533         102 :                     InstrCountTuples2(&mtstate->ps, 1);
     534         102 :                     return NULL;
     535             :                 }
     536             :             }
     537             : 
     538             :             /*
     539             :              * Before we start insertion proper, acquire our "speculative
     540             :              * insertion lock".  Others can use that to wait for us to decide
     541             :              * if we're going to go ahead with the insertion, instead of
     542             :              * waiting for the whole transaction to complete.
     543             :              */
     544        3894 :             specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
     545             : 
     546             :             /* insert the tuple, with the speculative token */
     547        3894 :             table_tuple_insert_speculative(resultRelationDesc, slot,
     548             :                                            estate->es_output_cid,
     549             :                                            0,
     550             :                                            NULL,
     551             :                                            specToken);
     552             : 
     553             :             /* insert index entries for tuple */
     554        3894 :             recheckIndexes = ExecInsertIndexTuples(slot, estate, true,
     555             :                                                    &specConflict,
     556             :                                                    arbiterIndexes);
     557             : 
     558             :             /* adjust the tuple's state accordingly */
     559        3890 :             table_tuple_complete_speculative(resultRelationDesc, slot,
     560        3890 :                                              specToken, !specConflict);
     561             : 
     562             :             /*
     563             :              * Wake up anyone waiting for our decision.  They will re-check
     564             :              * the tuple, see that it's no longer speculative, and wait on our
     565             :              * XID as if this was a regularly inserted tuple all along.  Or if
     566             :              * we killed the tuple, they will see it's dead, and proceed as if
     567             :              * the tuple never existed.
     568             :              */
     569        3890 :             SpeculativeInsertionLockRelease(GetCurrentTransactionId());
     570             : 
     571             :             /*
     572             :              * If there was a conflict, start from the beginning.  We'll do
     573             :              * the pre-check again, which will now find the conflicting tuple
     574             :              * (unless it aborts before we get there).
     575             :              */
     576        3890 :             if (specConflict)
     577             :             {
     578           8 :                 list_free(recheckIndexes);
     579           8 :                 goto vlock;
     580             :             }
     581             : 
     582             :             /* Since there was no insertion conflict, we're done */
     583             :         }
     584             :         else
     585             :         {
     586             :             /* insert the tuple normally */
     587    11363008 :             table_tuple_insert(resultRelationDesc, slot,
     588             :                                estate->es_output_cid,
     589             :                                0, NULL);
     590             : 
     591             :             /* insert index entries for tuple */
     592    11362986 :             if (resultRelInfo->ri_NumIndices > 0)
     593     4904984 :                 recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL,
     594             :                                                        NIL);
     595             :         }
     596             :     }
     597             : 
     598    11366970 :     if (canSetTag)
     599             :     {
     600    11366254 :         (estate->es_processed)++;
     601    11366254 :         setLastTid(&slot->tts_tid);
     602             :     }
     603             : 
     604             :     /*
     605             :      * If this insert is the result of a partition key update that moved the
     606             :      * tuple to a new partition, put this row into the transition NEW TABLE,
     607             :      * if there is one. We need to do this separately for DELETE and INSERT
     608             :      * because they happen on different tables.
     609             :      */
     610    11366970 :     ar_insert_trig_tcs = mtstate->mt_transition_capture;
     611    11366970 :     if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
     612          28 :         && mtstate->mt_transition_capture->tcs_update_new_table)
     613             :     {
     614          28 :         ExecARUpdateTriggers(estate, resultRelInfo, NULL,
     615             :                              NULL,
     616             :                              slot,
     617             :                              NULL,
     618          28 :                              mtstate->mt_transition_capture);
     619             : 
     620             :         /*
     621             :          * We've already captured the NEW TABLE row, so make sure any AR
     622             :          * INSERT trigger fired below doesn't capture it again.
     623             :          */
     624          28 :         ar_insert_trig_tcs = NULL;
     625             :     }
     626             : 
     627             :     /* AFTER ROW INSERT Triggers */
     628    11366970 :     ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
     629             :                          ar_insert_trig_tcs);
     630             : 
     631    11366970 :     list_free(recheckIndexes);
     632             : 
     633             :     /*
     634             :      * Check any WITH CHECK OPTION constraints from parent views.  We are
     635             :      * required to do this after testing all constraints and uniqueness
     636             :      * violations per the SQL spec, so we do it after actually inserting the
     637             :      * record into the heap and all indexes.
     638             :      *
     639             :      * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
     640             :      * tuple will never be seen, if it violates the WITH CHECK OPTION.
     641             :      *
     642             :      * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
     643             :      * are looking for at this point.
     644             :      */
     645    11366970 :     if (resultRelInfo->ri_WithCheckOptions != NIL)
     646         212 :         ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
     647             : 
     648             :     /* Process RETURNING if present */
     649    11366882 :     if (resultRelInfo->ri_projectReturning)
     650        1394 :         result = ExecProcessReturning(resultRelInfo, slot, planSlot);
     651             : 
     652    11366882 :     return result;
     653             : }
     654             : 
     655             : /* ----------------------------------------------------------------
     656             :  *      ExecDelete
     657             :  *
     658             :  *      DELETE is like UPDATE, except that we delete the tuple and no
     659             :  *      index modifications are needed.
     660             :  *
     661             :  *      When deleting from a table, tupleid identifies the tuple to
     662             :  *      delete and oldtuple is NULL.  When deleting from a view,
     663             :  *      oldtuple is passed to the INSTEAD OF triggers and identifies
     664             :  *      what to delete, and tupleid is invalid.  When deleting from a
     665             :  *      foreign table, tupleid is invalid; the FDW has to figure out
     666             :  *      which row to delete using data from the planSlot.  oldtuple is
     667             :  *      passed to foreign table triggers; it is NULL when the foreign
     668             :  *      table has no relevant triggers.  We use tupleDeleted to indicate
     669             :  *      whether the tuple is actually deleted, callers can use it to
     670             :  *      decide whether to continue the operation.  When this DELETE is a
     671             :  *      part of an UPDATE of partition-key, then the slot returned by
     672             :  *      EvalPlanQual() is passed back using output parameter epqslot.
     673             :  *
     674             :  *      Returns RETURNING result if any, otherwise NULL.
     675             :  * ----------------------------------------------------------------
     676             :  */
     677             : static TupleTableSlot *
     678      831432 : ExecDelete(ModifyTableState *mtstate,
     679             :            ItemPointer tupleid,
     680             :            HeapTuple oldtuple,
     681             :            TupleTableSlot *planSlot,
     682             :            EPQState *epqstate,
     683             :            EState *estate,
     684             :            bool processReturning,
     685             :            bool canSetTag,
     686             :            bool changingPart,
     687             :            bool *tupleDeleted,
     688             :            TupleTableSlot **epqreturnslot)
     689             : {
     690             :     ResultRelInfo *resultRelInfo;
     691             :     Relation    resultRelationDesc;
     692             :     TM_Result   result;
     693             :     TM_FailureData tmfd;
     694      831432 :     TupleTableSlot *slot = NULL;
     695             :     TransitionCaptureState *ar_delete_trig_tcs;
     696             : 
     697      831432 :     if (tupleDeleted)
     698         328 :         *tupleDeleted = false;
     699             : 
     700             :     /*
     701             :      * get information on the (current) result relation
     702             :      */
     703      831432 :     resultRelInfo = estate->es_result_relation_info;
     704      831432 :     resultRelationDesc = resultRelInfo->ri_RelationDesc;
     705             : 
     706             :     /* BEFORE ROW DELETE Triggers */
     707      835708 :     if (resultRelInfo->ri_TrigDesc &&
     708        4276 :         resultRelInfo->ri_TrigDesc->trig_delete_before_row)
     709             :     {
     710             :         bool        dodelete;
     711             : 
     712         216 :         dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
     713             :                                         tupleid, oldtuple, epqreturnslot);
     714             : 
     715         192 :         if (!dodelete)          /* "do nothing" */
     716          22 :             return NULL;
     717             :     }
     718             : 
     719             :     /* INSTEAD OF ROW DELETE Triggers */
     720      835616 :     if (resultRelInfo->ri_TrigDesc &&
     721        4230 :         resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
     722          34 :     {
     723             :         bool        dodelete;
     724             : 
     725             :         Assert(oldtuple != NULL);
     726          38 :         dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
     727             : 
     728          38 :         if (!dodelete)          /* "do nothing" */
     729           4 :             return NULL;
     730             :     }
     731      831348 :     else if (resultRelInfo->ri_FdwRoutine)
     732             :     {
     733             :         /*
     734             :          * delete from foreign table: let the FDW do it
     735             :          *
     736             :          * We offer the returning slot as a place to store RETURNING data,
     737             :          * although the FDW can return some other slot if it wants.
     738             :          */
     739          16 :         slot = ExecGetReturningSlot(estate, resultRelInfo);
     740          16 :         slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
     741             :                                                                resultRelInfo,
     742             :                                                                slot,
     743             :                                                                planSlot);
     744             : 
     745          16 :         if (slot == NULL)       /* "do nothing" */
     746           0 :             return NULL;
     747             : 
     748             :         /*
     749             :          * RETURNING expressions might reference the tableoid column, so
     750             :          * (re)initialize tts_tableOid before evaluating them.
     751             :          */
     752          16 :         if (TTS_EMPTY(slot))
     753           0 :             ExecStoreAllNullTuple(slot);
     754             : 
     755          16 :         slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
     756             :     }
     757             :     else
     758             :     {
     759             :         /*
     760             :          * delete the tuple
     761             :          *
     762             :          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
     763             :          * that the row to be deleted is visible to that snapshot, and throw a
     764             :          * can't-serialize error if not. This is a special-case behavior
     765             :          * needed for referential integrity updates in transaction-snapshot
     766             :          * mode transactions.
     767             :          */
     768             : ldelete:;
     769      831336 :         result = table_tuple_delete(resultRelationDesc, tupleid,
     770             :                                     estate->es_output_cid,
     771             :                                     estate->es_snapshot,
     772             :                                     estate->es_crosscheck_snapshot,
     773             :                                     true /* wait for commit */ ,
     774             :                                     &tmfd,
     775             :                                     changingPart);
     776             : 
     777      831300 :         switch (result)
     778             :         {
     779             :             case TM_SelfModified:
     780             : 
     781             :                 /*
     782             :                  * The target tuple was already updated or deleted by the
     783             :                  * current command, or by a later command in the current
     784             :                  * transaction.  The former case is possible in a join DELETE
     785             :                  * where multiple tuples join to the same target tuple. This
     786             :                  * is somewhat questionable, but Postgres has always allowed
     787             :                  * it: we just ignore additional deletion attempts.
     788             :                  *
     789             :                  * The latter case arises if the tuple is modified by a
     790             :                  * command in a BEFORE trigger, or perhaps by a command in a
     791             :                  * volatile function used in the query.  In such situations we
     792             :                  * should not ignore the deletion, but it is equally unsafe to
     793             :                  * proceed.  We don't want to discard the original DELETE
     794             :                  * while keeping the triggered actions based on its deletion;
     795             :                  * and it would be no better to allow the original DELETE
     796             :                  * while discarding updates that it triggered.  The row update
     797             :                  * carries some information that might be important according
     798             :                  * to business rules; so throwing an error is the only safe
     799             :                  * course.
     800             :                  *
     801             :                  * If a trigger actually intends this type of interaction, it
     802             :                  * can re-execute the DELETE and then return NULL to cancel
     803             :                  * the outer delete.
     804             :                  */
     805          20 :                 if (tmfd.cmax != estate->es_output_cid)
     806           4 :                     ereport(ERROR,
     807             :                             (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
     808             :                              errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
     809             :                              errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
     810             : 
     811             :                 /* Else, already deleted by self; nothing to do */
     812          16 :                 return NULL;
     813             : 
     814             :             case TM_Ok:
     815      831256 :                 break;
     816             : 
     817             :             case TM_Updated:
     818             :                 {
     819             :                     TupleTableSlot *inputslot;
     820             :                     TupleTableSlot *epqslot;
     821             : 
     822          20 :                     if (IsolationUsesXactSnapshot())
     823           0 :                         ereport(ERROR,
     824             :                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
     825             :                                  errmsg("could not serialize access due to concurrent update")));
     826             : 
     827             :                     /*
     828             :                      * Already know that we're going to need to do EPQ, so
     829             :                      * fetch tuple directly into the right slot.
     830             :                      */
     831          20 :                     EvalPlanQualBegin(epqstate);
     832          20 :                     inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
     833             :                                                  resultRelInfo->ri_RangeTableIndex);
     834             : 
     835          20 :                     result = table_tuple_lock(resultRelationDesc, tupleid,
     836             :                                               estate->es_snapshot,
     837             :                                               inputslot, estate->es_output_cid,
     838             :                                               LockTupleExclusive, LockWaitBlock,
     839             :                                               TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
     840             :                                               &tmfd);
     841             : 
     842          18 :                     switch (result)
     843             :                     {
     844             :                         case TM_Ok:
     845             :                             Assert(tmfd.traversed);
     846          12 :                             epqslot = EvalPlanQual(epqstate,
     847             :                                                    resultRelationDesc,
     848             :                                                    resultRelInfo->ri_RangeTableIndex,
     849             :                                                    inputslot);
     850          12 :                             if (TupIsNull(epqslot))
     851             :                                 /* Tuple not passing quals anymore, exiting... */
     852           4 :                                 return NULL;
     853             : 
     854             :                             /*
     855             :                              * If requested, skip delete and pass back the
     856             :                              * updated row.
     857             :                              */
     858           8 :                             if (epqreturnslot)
     859             :                             {
     860           4 :                                 *epqreturnslot = epqslot;
     861           4 :                                 return NULL;
     862             :                             }
     863             :                             else
     864           4 :                                 goto ldelete;
     865             : 
     866             :                         case TM_SelfModified:
     867             : 
     868             :                             /*
     869             :                              * This can be reached when following an update
     870             :                              * chain from a tuple updated by another session,
     871             :                              * reaching a tuple that was already updated in
     872             :                              * this transaction. If previously updated by this
     873             :                              * command, ignore the delete, otherwise error
     874             :                              * out.
     875             :                              *
     876             :                              * See also TM_SelfModified response to
     877             :                              * table_tuple_delete() above.
     878             :                              */
     879           4 :                             if (tmfd.cmax != estate->es_output_cid)
     880           2 :                                 ereport(ERROR,
     881             :                                         (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
     882             :                                          errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
     883             :                                          errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
     884           2 :                             return NULL;
     885             : 
     886             :                         case TM_Deleted:
     887             :                             /* tuple already deleted; nothing to do */
     888           2 :                             return NULL;
     889             : 
     890             :                         default:
     891             : 
     892             :                             /*
     893             :                              * TM_Invisible should be impossible because we're
     894             :                              * waiting for updated row versions, and would
     895             :                              * already have errored out if the first version
     896             :                              * is invisible.
     897             :                              *
     898             :                              * TM_Updated should be impossible, because we're
     899             :                              * locking the latest version via
     900             :                              * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
     901             :                              */
     902           0 :                             elog(ERROR, "unexpected table_tuple_lock status: %u",
     903             :                                  result);
     904             :                             return NULL;
     905             :                     }
     906             : 
     907             :                     Assert(false);
     908             :                     break;
     909             :                 }
     910             : 
     911             :             case TM_Deleted:
     912           4 :                 if (IsolationUsesXactSnapshot())
     913           0 :                     ereport(ERROR,
     914             :                             (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
     915             :                              errmsg("could not serialize access due to concurrent delete")));
     916             :                 /* tuple already deleted; nothing to do */
     917           4 :                 return NULL;
     918             : 
     919             :             default:
     920           0 :                 elog(ERROR, "unrecognized table_tuple_delete status: %u",
     921             :                      result);
     922             :                 return NULL;
     923             :         }
     924             : 
     925             :         /*
     926             :          * Note: Normally one would think that we have to delete index tuples
     927             :          * associated with the heap tuple now...
     928             :          *
     929             :          * ... but in POSTGRES, we have no need to do this because VACUUM will
     930             :          * take care of it later.  We can't delete index tuples immediately
     931             :          * anyway, since the tuple is still visible to other transactions.
     932             :          */
     933             :     }
     934             : 
     935      831306 :     if (canSetTag)
     936      830872 :         (estate->es_processed)++;
     937             : 
     938             :     /* Tell caller that the delete actually happened. */
     939      831306 :     if (tupleDeleted)
     940         296 :         *tupleDeleted = true;
     941             : 
     942             :     /*
     943             :      * If this delete is the result of a partition key update that moved the
     944             :      * tuple to a new partition, put this row into the transition OLD TABLE,
     945             :      * if there is one. We need to do this separately for DELETE and INSERT
     946             :      * because they happen on different tables.
     947             :      */
     948      831306 :     ar_delete_trig_tcs = mtstate->mt_transition_capture;
     949      831306 :     if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
     950          28 :         && mtstate->mt_transition_capture->tcs_update_old_table)
     951             :     {
     952          28 :         ExecARUpdateTriggers(estate, resultRelInfo,
     953             :                              tupleid,
     954             :                              oldtuple,
     955             :                              NULL,
     956             :                              NULL,
     957          28 :                              mtstate->mt_transition_capture);
     958             : 
     959             :         /*
     960             :          * We've already captured the NEW TABLE row, so make sure any AR
     961             :          * DELETE trigger fired below doesn't capture it again.
     962             :          */
     963          28 :         ar_delete_trig_tcs = NULL;
     964             :     }
     965             : 
     966             :     /* AFTER ROW DELETE Triggers */
     967      831306 :     ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
     968             :                          ar_delete_trig_tcs);
     969             : 
     970             :     /* Process RETURNING if present and if requested */
     971      831306 :     if (processReturning && resultRelInfo->ri_projectReturning)
     972             :     {
     973             :         /*
     974             :          * We have to put the target tuple into a slot, which means first we
     975             :          * gotta fetch it.  We can use the trigger tuple slot.
     976             :          */
     977             :         TupleTableSlot *rslot;
     978             : 
     979         632 :         if (resultRelInfo->ri_FdwRoutine)
     980             :         {
     981             :             /* FDW must have provided a slot containing the deleted row */
     982             :             Assert(!TupIsNull(slot));
     983             :         }
     984             :         else
     985             :         {
     986         630 :             slot = ExecGetReturningSlot(estate, resultRelInfo);
     987         630 :             if (oldtuple != NULL)
     988             :             {
     989          16 :                 ExecForceStoreHeapTuple(oldtuple, slot, false);
     990             :             }
     991             :             else
     992             :             {
     993         614 :                 if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
     994             :                                                    SnapshotAny, slot))
     995           0 :                     elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
     996             :             }
     997             :         }
     998             : 
     999         632 :         rslot = ExecProcessReturning(resultRelInfo, slot, planSlot);
    1000             : 
    1001             :         /*
    1002             :          * Before releasing the target tuple again, make sure rslot has a
    1003             :          * local copy of any pass-by-reference values.
    1004             :          */
    1005         632 :         ExecMaterializeSlot(rslot);
    1006             : 
    1007         632 :         ExecClearTuple(slot);
    1008             : 
    1009         632 :         return rslot;
    1010             :     }
    1011             : 
    1012      830674 :     return NULL;
    1013             : }
    1014             : 
    1015             : /* ----------------------------------------------------------------
    1016             :  *      ExecUpdate
    1017             :  *
    1018             :  *      note: we can't run UPDATE queries with transactions
    1019             :  *      off because UPDATEs are actually INSERTs and our
    1020             :  *      scan will mistakenly loop forever, updating the tuple
    1021             :  *      it just inserted..  This should be fixed but until it
    1022             :  *      is, we don't want to get stuck in an infinite loop
    1023             :  *      which corrupts your database..
    1024             :  *
    1025             :  *      When updating a table, tupleid identifies the tuple to
    1026             :  *      update and oldtuple is NULL.  When updating a view, oldtuple
    1027             :  *      is passed to the INSTEAD OF triggers and identifies what to
    1028             :  *      update, and tupleid is invalid.  When updating a foreign table,
    1029             :  *      tupleid is invalid; the FDW has to figure out which row to
    1030             :  *      update using data from the planSlot.  oldtuple is passed to
    1031             :  *      foreign table triggers; it is NULL when the foreign table has
    1032             :  *      no relevant triggers.
    1033             :  *
    1034             :  *      Returns RETURNING result if any, otherwise NULL.
    1035             :  * ----------------------------------------------------------------
    1036             :  */
    1037             : static TupleTableSlot *
    1038      118968 : ExecUpdate(ModifyTableState *mtstate,
    1039             :            ItemPointer tupleid,
    1040             :            HeapTuple oldtuple,
    1041             :            TupleTableSlot *slot,
    1042             :            TupleTableSlot *planSlot,
    1043             :            EPQState *epqstate,
    1044             :            EState *estate,
    1045             :            bool canSetTag)
    1046             : {
    1047             :     ResultRelInfo *resultRelInfo;
    1048             :     Relation    resultRelationDesc;
    1049             :     TM_Result   result;
    1050             :     TM_FailureData tmfd;
    1051      118968 :     List       *recheckIndexes = NIL;
    1052      118968 :     TupleConversionMap *saved_tcs_map = NULL;
    1053             : 
    1054             :     /*
    1055             :      * abort the operation if not running transactions
    1056             :      */
    1057      118968 :     if (IsBootstrapProcessingMode())
    1058           0 :         elog(ERROR, "cannot UPDATE during bootstrap");
    1059             : 
    1060      118968 :     ExecMaterializeSlot(slot);
    1061             : 
    1062             :     /*
    1063             :      * get information on the (current) result relation
    1064             :      */
    1065      118968 :     resultRelInfo = estate->es_result_relation_info;
    1066      118968 :     resultRelationDesc = resultRelInfo->ri_RelationDesc;
    1067             : 
    1068             :     /* BEFORE ROW UPDATE Triggers */
    1069      122878 :     if (resultRelInfo->ri_TrigDesc &&
    1070        3910 :         resultRelInfo->ri_TrigDesc->trig_update_before_row)
    1071             :     {
    1072        1918 :         if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
    1073             :                                   tupleid, oldtuple, slot))
    1074          96 :             return NULL;        /* "do nothing" */
    1075             :     }
    1076             : 
    1077             :     /* INSTEAD OF ROW UPDATE Triggers */
    1078      122622 :     if (resultRelInfo->ri_TrigDesc &&
    1079        3782 :         resultRelInfo->ri_TrigDesc->trig_update_instead_row)
    1080             :     {
    1081         140 :         if (!ExecIRUpdateTriggers(estate, resultRelInfo,
    1082             :                                   oldtuple, slot))
    1083          12 :             return NULL;        /* "do nothing" */
    1084             :     }
    1085      118762 :     else if (resultRelInfo->ri_FdwRoutine)
    1086             :     {
    1087             :         /*
    1088             :          * Compute stored generated columns
    1089             :          */
    1090         138 :         if (resultRelationDesc->rd_att->constr &&
    1091          56 :             resultRelationDesc->rd_att->constr->has_generated_stored)
    1092           2 :             ExecComputeStoredGenerated(estate, slot);
    1093             : 
    1094             :         /*
    1095             :          * update in foreign table: let the FDW do it
    1096             :          */
    1097          82 :         slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
    1098             :                                                                resultRelInfo,
    1099             :                                                                slot,
    1100             :                                                                planSlot);
    1101             : 
    1102          82 :         if (slot == NULL)       /* "do nothing" */
    1103           2 :             return NULL;
    1104             : 
    1105             :         /*
    1106             :          * AFTER ROW Triggers or RETURNING expressions might reference the
    1107             :          * tableoid column, so (re-)initialize tts_tableOid before evaluating
    1108             :          * them.
    1109             :          */
    1110          80 :         slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
    1111             :     }
    1112             :     else
    1113             :     {
    1114             :         LockTupleMode lockmode;
    1115             :         bool        partition_constraint_failed;
    1116             :         bool        update_indexes;
    1117             : 
    1118             :         /*
    1119             :          * Constraints might reference the tableoid column, so (re-)initialize
    1120             :          * tts_tableOid before evaluating them.
    1121             :          */
    1122      118680 :         slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
    1123             : 
    1124             :         /*
    1125             :          * Compute stored generated columns
    1126             :          */
    1127      180776 :         if (resultRelationDesc->rd_att->constr &&
    1128       62096 :             resultRelationDesc->rd_att->constr->has_generated_stored)
    1129          44 :             ExecComputeStoredGenerated(estate, slot);
    1130             : 
    1131             :         /*
    1132             :          * Check any RLS UPDATE WITH CHECK policies
    1133             :          *
    1134             :          * If we generate a new candidate tuple after EvalPlanQual testing, we
    1135             :          * must loop back here and recheck any RLS policies and constraints.
    1136             :          * (We don't need to redo triggers, however.  If there are any BEFORE
    1137             :          * triggers then trigger.c will have done table_tuple_lock to lock the
    1138             :          * correct tuple, so there's no need to do them again.)
    1139             :          */
    1140             : lreplace:;
    1141             : 
    1142             :         /* ensure slot is independent, consider e.g. EPQ */
    1143      118752 :         ExecMaterializeSlot(slot);
    1144             : 
    1145             :         /*
    1146             :          * If partition constraint fails, this row might get moved to another
    1147             :          * partition, in which case we should check the RLS CHECK policy just
    1148             :          * before inserting into the new partition, rather than doing it here.
    1149             :          * This is because a trigger on that partition might again change the
    1150             :          * row.  So skip the WCO checks if the partition constraint fails.
    1151             :          */
    1152      118752 :         partition_constraint_failed =
    1153      119766 :             resultRelInfo->ri_PartitionCheck &&
    1154        1014 :             !ExecPartitionCheck(resultRelInfo, slot, estate, false);
    1155             : 
    1156      237148 :         if (!partition_constraint_failed &&
    1157      118396 :             resultRelInfo->ri_WithCheckOptions != NIL)
    1158             :         {
    1159             :             /*
    1160             :              * ExecWithCheckOptions() will skip any WCOs which are not of the
    1161             :              * kind we are looking for at this point.
    1162             :              */
    1163         284 :             ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
    1164             :                                  resultRelInfo, slot, estate);
    1165             :         }
    1166             : 
    1167             :         /*
    1168             :          * If a partition check failed, try to move the row into the right
    1169             :          * partition.
    1170             :          */
    1171      118724 :         if (partition_constraint_failed)
    1172             :         {
    1173             :             bool        tuple_deleted;
    1174             :             TupleTableSlot *ret_slot;
    1175         356 :             TupleTableSlot *epqslot = NULL;
    1176         356 :             PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
    1177             :             int         map_index;
    1178             :             TupleConversionMap *tupconv_map;
    1179             : 
    1180             :             /*
    1181             :              * Disallow an INSERT ON CONFLICT DO UPDATE that causes the
    1182             :              * original row to migrate to a different partition.  Maybe this
    1183             :              * can be implemented some day, but it seems a fringe feature with
    1184             :              * little redeeming value.
    1185             :              */
    1186         356 :             if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
    1187           0 :                 ereport(ERROR,
    1188             :                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    1189             :                          errmsg("invalid ON UPDATE specification"),
    1190             :                          errdetail("The result tuple would appear in a different partition than the original tuple.")));
    1191             : 
    1192             :             /*
    1193             :              * When an UPDATE is run on a leaf partition, we will not have
    1194             :              * partition tuple routing set up. In that case, fail with
    1195             :              * partition constraint violation error.
    1196             :              */
    1197         356 :             if (proute == NULL)
    1198          28 :                 ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
    1199             : 
    1200             :             /*
    1201             :              * Row movement, part 1.  Delete the tuple, but skip RETURNING
    1202             :              * processing. We want to return rows from INSERT.
    1203             :              */
    1204         328 :             ExecDelete(mtstate, tupleid, oldtuple, planSlot, epqstate,
    1205             :                        estate, false, false /* canSetTag */ ,
    1206             :                        true /* changingPart */ , &tuple_deleted, &epqslot);
    1207             : 
    1208             :             /*
    1209             :              * For some reason if DELETE didn't happen (e.g. trigger prevented
    1210             :              * it, or it was already deleted by self, or it was concurrently
    1211             :              * deleted by another transaction), then we should skip the insert
    1212             :              * as well; otherwise, an UPDATE could cause an increase in the
    1213             :              * total number of rows across all partitions, which is clearly
    1214             :              * wrong.
    1215             :              *
    1216             :              * For a normal UPDATE, the case where the tuple has been the
    1217             :              * subject of a concurrent UPDATE or DELETE would be handled by
    1218             :              * the EvalPlanQual machinery, but for an UPDATE that we've
    1219             :              * translated into a DELETE from this partition and an INSERT into
    1220             :              * some other partition, that's not available, because CTID chains
    1221             :              * can't span relation boundaries.  We mimic the semantics to a
    1222             :              * limited extent by skipping the INSERT if the DELETE fails to
    1223             :              * find a tuple. This ensures that two concurrent attempts to
    1224             :              * UPDATE the same tuple at the same time can't turn one tuple
    1225             :              * into two, and that an UPDATE of a just-deleted tuple can't
    1226             :              * resurrect it.
    1227             :              */
    1228         328 :             if (!tuple_deleted)
    1229             :             {
    1230             :                 /*
    1231             :                  * epqslot will be typically NULL.  But when ExecDelete()
    1232             :                  * finds that another transaction has concurrently updated the
    1233             :                  * same row, it re-fetches the row, skips the delete, and
    1234             :                  * epqslot is set to the re-fetched tuple slot. In that case,
    1235             :                  * we need to do all the checks again.
    1236             :                  */
    1237          32 :                 if (TupIsNull(epqslot))
    1238         318 :                     return NULL;
    1239             :                 else
    1240             :                 {
    1241           6 :                     slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
    1242           6 :                     goto lreplace;
    1243             :                 }
    1244             :             }
    1245             : 
    1246             :             /*
    1247             :              * Updates set the transition capture map only when a new subplan
    1248             :              * is chosen.  But for inserts, it is set for each row. So after
    1249             :              * INSERT, we need to revert back to the map created for UPDATE;
    1250             :              * otherwise the next UPDATE will incorrectly use the one created
    1251             :              * for INSERT.  So first save the one created for UPDATE.
    1252             :              */
    1253         296 :             if (mtstate->mt_transition_capture)
    1254          28 :                 saved_tcs_map = mtstate->mt_transition_capture->tcs_map;
    1255             : 
    1256             :             /*
    1257             :              * resultRelInfo is one of the per-subplan resultRelInfos.  So we
    1258             :              * should convert the tuple into root's tuple descriptor, since
    1259             :              * ExecInsert() starts the search from root.  The tuple conversion
    1260             :              * map list is in the order of mtstate->resultRelInfo[], so to
    1261             :              * retrieve the one for this resultRel, we need to know the
    1262             :              * position of the resultRel in mtstate->resultRelInfo[].
    1263             :              */
    1264         296 :             map_index = resultRelInfo - mtstate->resultRelInfo;
    1265             :             Assert(map_index >= 0 && map_index < mtstate->mt_nplans);
    1266         296 :             tupconv_map = tupconv_map_for_subplan(mtstate, map_index);
    1267         296 :             if (tupconv_map != NULL)
    1268         108 :                 slot = execute_attr_map_slot(tupconv_map->attrMap,
    1269             :                                              slot,
    1270             :                                              mtstate->mt_root_tuple_slot);
    1271             : 
    1272             :             /*
    1273             :              * Prepare for tuple routing, making it look like we're inserting
    1274             :              * into the root.
    1275             :              */
    1276             :             Assert(mtstate->rootResultRelInfo != NULL);
    1277         296 :             slot = ExecPrepareTupleRouting(mtstate, estate, proute,
    1278             :                                            mtstate->rootResultRelInfo, slot);
    1279             : 
    1280         286 :             ret_slot = ExecInsert(mtstate, slot, planSlot,
    1281             :                                   estate, canSetTag);
    1282             : 
    1283             :             /* Revert ExecPrepareTupleRouting's node change. */
    1284         266 :             estate->es_result_relation_info = resultRelInfo;
    1285         266 :             if (mtstate->mt_transition_capture)
    1286             :             {
    1287          28 :                 mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
    1288          28 :                 mtstate->mt_transition_capture->tcs_map = saved_tcs_map;
    1289             :             }
    1290             : 
    1291         266 :             return ret_slot;
    1292             :         }
    1293             : 
    1294             :         /*
    1295             :          * Check the constraints of the tuple.  We've already checked the
    1296             :          * partition constraint above; however, we must still ensure the tuple
    1297             :          * passes all other constraints, so we will call ExecConstraints() and
    1298             :          * have it validate all remaining checks.
    1299             :          */
    1300      118368 :         if (resultRelationDesc->rd_att->constr)
    1301       62066 :             ExecConstraints(resultRelInfo, slot, estate);
    1302             : 
    1303             :         /*
    1304             :          * replace the heap tuple
    1305             :          *
    1306             :          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
    1307             :          * that the row to be updated is visible to that snapshot, and throw a
    1308             :          * can't-serialize error if not. This is a special-case behavior
    1309             :          * needed for referential integrity updates in transaction-snapshot
    1310             :          * mode transactions.
    1311             :          */
    1312      118352 :         result = table_tuple_update(resultRelationDesc, tupleid, slot,
    1313             :                                     estate->es_output_cid,
    1314             :                                     estate->es_snapshot,
    1315             :                                     estate->es_crosscheck_snapshot,
    1316             :                                     true /* wait for commit */ ,
    1317             :                                     &tmfd, &lockmode, &update_indexes);
    1318             : 
    1319      118328 :         switch (result)
    1320             :         {
    1321             :             case TM_SelfModified:
    1322             : 
    1323             :                 /*
    1324             :                  * The target tuple was already updated or deleted by the
    1325             :                  * current command, or by a later command in the current
    1326             :                  * transaction.  The former case is possible in a join UPDATE
    1327             :                  * where multiple tuples join to the same target tuple. This
    1328             :                  * is pretty questionable, but Postgres has always allowed it:
    1329             :                  * we just execute the first update action and ignore
    1330             :                  * additional update attempts.
    1331             :                  *
    1332             :                  * The latter case arises if the tuple is modified by a
    1333             :                  * command in a BEFORE trigger, or perhaps by a command in a
    1334             :                  * volatile function used in the query.  In such situations we
    1335             :                  * should not ignore the update, but it is equally unsafe to
    1336             :                  * proceed.  We don't want to discard the original UPDATE
    1337             :                  * while keeping the triggered actions based on it; and we
    1338             :                  * have no principled way to merge this update with the
    1339             :                  * previous ones.  So throwing an error is the only safe
    1340             :                  * course.
    1341             :                  *
    1342             :                  * If a trigger actually intends this type of interaction, it
    1343             :                  * can re-execute the UPDATE (assuming it can figure out how)
    1344             :                  * and then return NULL to cancel the outer update.
    1345             :                  */
    1346          56 :                 if (tmfd.cmax != estate->es_output_cid)
    1347           4 :                     ereport(ERROR,
    1348             :                             (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
    1349             :                              errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
    1350             :                              errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
    1351             : 
    1352             :                 /* Else, already updated by self; nothing to do */
    1353          52 :                 return NULL;
    1354             : 
    1355             :             case TM_Ok:
    1356      118184 :                 break;
    1357             : 
    1358             :             case TM_Updated:
    1359             :                 {
    1360             :                     TupleTableSlot *inputslot;
    1361             :                     TupleTableSlot *epqslot;
    1362             : 
    1363          84 :                     if (IsolationUsesXactSnapshot())
    1364           0 :                         ereport(ERROR,
    1365             :                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
    1366             :                                  errmsg("could not serialize access due to concurrent update")));
    1367             : 
    1368             :                     /*
    1369             :                      * Already know that we're going to need to do EPQ, so
    1370             :                      * fetch tuple directly into the right slot.
    1371             :                      */
    1372          84 :                     inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
    1373             :                                                  resultRelInfo->ri_RangeTableIndex);
    1374             : 
    1375          84 :                     result = table_tuple_lock(resultRelationDesc, tupleid,
    1376             :                                               estate->es_snapshot,
    1377             :                                               inputslot, estate->es_output_cid,
    1378             :                                               lockmode, LockWaitBlock,
    1379             :                                               TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
    1380             :                                               &tmfd);
    1381             : 
    1382          80 :                     switch (result)
    1383             :                     {
    1384             :                         case TM_Ok:
    1385             :                             Assert(tmfd.traversed);
    1386             : 
    1387          72 :                             epqslot = EvalPlanQual(epqstate,
    1388             :                                                    resultRelationDesc,
    1389             :                                                    resultRelInfo->ri_RangeTableIndex,
    1390             :                                                    inputslot);
    1391          72 :                             if (TupIsNull(epqslot))
    1392             :                                 /* Tuple not passing quals anymore, exiting... */
    1393           6 :                                 return NULL;
    1394             : 
    1395          66 :                             slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
    1396          66 :                             goto lreplace;
    1397             : 
    1398             :                         case TM_Deleted:
    1399             :                             /* tuple already deleted; nothing to do */
    1400           2 :                             return NULL;
    1401             : 
    1402             :                         case TM_SelfModified:
    1403             : 
    1404             :                             /*
    1405             :                              * This can be reached when following an update
    1406             :                              * chain from a tuple updated by another session,
    1407             :                              * reaching a tuple that was already updated in
    1408             :                              * this transaction. If previously modified by
    1409             :                              * this command, ignore the redundant update,
    1410             :                              * otherwise error out.
    1411             :                              *
    1412             :                              * See also TM_SelfModified response to
    1413             :                              * table_tuple_update() above.
    1414             :                              */
    1415           6 :                             if (tmfd.cmax != estate->es_output_cid)
    1416           2 :                                 ereport(ERROR,
    1417             :                                         (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
    1418             :                                          errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
    1419             :                                          errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
    1420           4 :                             return NULL;
    1421             : 
    1422             :                         default:
    1423             :                             /* see table_tuple_lock call in ExecDelete() */
    1424           0 :                             elog(ERROR, "unexpected table_tuple_lock status: %u",
    1425             :                                  result);
    1426             :                             return NULL;
    1427             :                     }
    1428             :                 }
    1429             : 
    1430             :                 break;
    1431             : 
    1432             :             case TM_Deleted:
    1433           4 :                 if (IsolationUsesXactSnapshot())
    1434           0 :                     ereport(ERROR,
    1435             :                             (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
    1436             :                              errmsg("could not serialize access due to concurrent delete")));
    1437             :                 /* tuple already deleted; nothing to do */
    1438           4 :                 return NULL;
    1439             : 
    1440             :             default:
    1441           0 :                 elog(ERROR, "unrecognized table_tuple_update status: %u",
    1442             :                      result);
    1443             :                 return NULL;
    1444             :         }
    1445             : 
    1446             :         /* insert index entries for tuple if necessary */
    1447      118184 :         if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
    1448       60748 :             recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL, NIL);
    1449             :     }
    1450             : 
    1451      118314 :     if (canSetTag)
    1452      117918 :         (estate->es_processed)++;
    1453             : 
    1454             :     /* AFTER ROW UPDATE Triggers */
    1455      118314 :     ExecARUpdateTriggers(estate, resultRelInfo, tupleid, oldtuple, slot,
    1456             :                          recheckIndexes,
    1457      118314 :                          mtstate->operation == CMD_INSERT ?
    1458             :                          mtstate->mt_oc_transition_capture :
    1459             :                          mtstate->mt_transition_capture);
    1460             : 
    1461      118314 :     list_free(recheckIndexes);
    1462             : 
    1463             :     /*
    1464             :      * Check any WITH CHECK OPTION constraints from parent views.  We are
    1465             :      * required to do this after testing all constraints and uniqueness
    1466             :      * violations per the SQL spec, so we do it after actually updating the
    1467             :      * record in the heap and all indexes.
    1468             :      *
    1469             :      * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
    1470             :      * are looking for at this point.
    1471             :      */
    1472      118314 :     if (resultRelInfo->ri_WithCheckOptions != NIL)
    1473         272 :         ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
    1474             : 
    1475             :     /* Process RETURNING if present */
    1476      118270 :     if (resultRelInfo->ri_projectReturning)
    1477        1628 :         return ExecProcessReturning(resultRelInfo, slot, planSlot);
    1478             : 
    1479      116642 :     return NULL;
    1480             : }
    1481             : 
    1482             : /*
    1483             :  * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
    1484             :  *
    1485             :  * Try to lock tuple for update as part of speculative insertion.  If
    1486             :  * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
    1487             :  * (but still lock row, even though it may not satisfy estate's
    1488             :  * snapshot).
    1489             :  *
    1490             :  * Returns true if we're done (with or without an update), or false if
    1491             :  * the caller must retry the INSERT from scratch.
    1492             :  */
    1493             : static bool
    1494        4940 : ExecOnConflictUpdate(ModifyTableState *mtstate,
    1495             :                      ResultRelInfo *resultRelInfo,
    1496             :                      ItemPointer conflictTid,
    1497             :                      TupleTableSlot *planSlot,
    1498             :                      TupleTableSlot *excludedSlot,
    1499             :                      EState *estate,
    1500             :                      bool canSetTag,
    1501             :                      TupleTableSlot **returning)
    1502             : {
    1503        4940 :     ExprContext *econtext = mtstate->ps.ps_ExprContext;
    1504        4940 :     Relation    relation = resultRelInfo->ri_RelationDesc;
    1505        4940 :     ExprState  *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
    1506        4940 :     TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
    1507             :     TM_FailureData tmfd;
    1508             :     LockTupleMode lockmode;
    1509             :     TM_Result   test;
    1510             :     Datum       xminDatum;
    1511             :     TransactionId xmin;
    1512             :     bool        isnull;
    1513             : 
    1514             :     /* Determine lock mode to use */
    1515        4940 :     lockmode = ExecUpdateLockMode(estate, resultRelInfo);
    1516             : 
    1517             :     /*
    1518             :      * Lock tuple for update.  Don't follow updates when tuple cannot be
    1519             :      * locked without doing so.  A row locking conflict here means our
    1520             :      * previous conclusion that the tuple is conclusively committed is not
    1521             :      * true anymore.
    1522             :      */
    1523        4940 :     test = table_tuple_lock(relation, conflictTid,
    1524             :                             estate->es_snapshot,
    1525             :                             existing, estate->es_output_cid,
    1526             :                             lockmode, LockWaitBlock, 0,
    1527             :                             &tmfd);
    1528        4940 :     switch (test)
    1529             :     {
    1530             :         case TM_Ok:
    1531             :             /* success! */
    1532        4924 :             break;
    1533             : 
    1534             :         case TM_Invisible:
    1535             : 
    1536             :             /*
    1537             :              * This can occur when a just inserted tuple is updated again in
    1538             :              * the same command. E.g. because multiple rows with the same
    1539             :              * conflicting key values are inserted.
    1540             :              *
    1541             :              * This is somewhat similar to the ExecUpdate() TM_SelfModified
    1542             :              * case.  We do not want to proceed because it would lead to the
    1543             :              * same row being updated a second time in some unspecified order,
    1544             :              * and in contrast to plain UPDATEs there's no historical behavior
    1545             :              * to break.
    1546             :              *
    1547             :              * It is the user's responsibility to prevent this situation from
    1548             :              * occurring.  These problems are why SQL-2003 similarly specifies
    1549             :              * that for SQL MERGE, an exception must be raised in the event of
    1550             :              * an attempt to update the same row twice.
    1551             :              */
    1552          16 :             xminDatum = slot_getsysattr(existing,
    1553             :                                         MinTransactionIdAttributeNumber,
    1554             :                                         &isnull);
    1555             :             Assert(!isnull);
    1556          16 :             xmin = DatumGetTransactionId(xminDatum);
    1557             : 
    1558          16 :             if (TransactionIdIsCurrentTransactionId(xmin))
    1559          16 :                 ereport(ERROR,
    1560             :                         (errcode(ERRCODE_CARDINALITY_VIOLATION),
    1561             :                          errmsg("ON CONFLICT DO UPDATE command cannot affect row a second time"),
    1562             :                          errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
    1563             : 
    1564             :             /* This shouldn't happen */
    1565           0 :             elog(ERROR, "attempted to lock invisible tuple");
    1566             :             break;
    1567             : 
    1568             :         case TM_SelfModified:
    1569             : 
    1570             :             /*
    1571             :              * This state should never be reached. As a dirty snapshot is used
    1572             :              * to find conflicting tuples, speculative insertion wouldn't have
    1573             :              * seen this row to conflict with.
    1574             :              */
    1575           0 :             elog(ERROR, "unexpected self-updated tuple");
    1576             :             break;
    1577             : 
    1578             :         case TM_Updated:
    1579           0 :             if (IsolationUsesXactSnapshot())
    1580           0 :                 ereport(ERROR,
    1581             :                         (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
    1582             :                          errmsg("could not serialize access due to concurrent update")));
    1583             : 
    1584             :             /*
    1585             :              * As long as we don't support an UPDATE of INSERT ON CONFLICT for
    1586             :              * a partitioned table we shouldn't reach to a case where tuple to
    1587             :              * be lock is moved to another partition due to concurrent update
    1588             :              * of the partition key.
    1589             :              */
    1590             :             Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
    1591             : 
    1592             :             /*
    1593             :              * Tell caller to try again from the very start.
    1594             :              *
    1595             :              * It does not make sense to use the usual EvalPlanQual() style
    1596             :              * loop here, as the new version of the row might not conflict
    1597             :              * anymore, or the conflicting tuple has actually been deleted.
    1598             :              */
    1599           0 :             ExecClearTuple(existing);
    1600           0 :             return false;
    1601             : 
    1602             :         case TM_Deleted:
    1603           0 :             if (IsolationUsesXactSnapshot())
    1604           0 :                 ereport(ERROR,
    1605             :                         (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
    1606             :                          errmsg("could not serialize access due to concurrent delete")));
    1607             : 
    1608             :             /* see TM_Updated case */
    1609             :             Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
    1610           0 :             ExecClearTuple(existing);
    1611           0 :             return false;
    1612             : 
    1613             :         default:
    1614           0 :             elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
    1615             :     }
    1616             : 
    1617             :     /* Success, the tuple is locked. */
    1618             : 
    1619             :     /*
    1620             :      * Verify that the tuple is visible to our MVCC snapshot if the current
    1621             :      * isolation level mandates that.
    1622             :      *
    1623             :      * It's not sufficient to rely on the check within ExecUpdate() as e.g.
    1624             :      * CONFLICT ... WHERE clause may prevent us from reaching that.
    1625             :      *
    1626             :      * This means we only ever continue when a new command in the current
    1627             :      * transaction could see the row, even though in READ COMMITTED mode the
    1628             :      * tuple will not be visible according to the current statement's
    1629             :      * snapshot.  This is in line with the way UPDATE deals with newer tuple
    1630             :      * versions.
    1631             :      */
    1632        4924 :     ExecCheckTupleVisible(estate, relation, existing);
    1633             : 
    1634             :     /*
    1635             :      * Make tuple and any needed join variables available to ExecQual and
    1636             :      * ExecProject.  The EXCLUDED tuple is installed in ecxt_innertuple, while
    1637             :      * the target's existing tuple is installed in the scantuple.  EXCLUDED
    1638             :      * has been made to reference INNER_VAR in setrefs.c, but there is no
    1639             :      * other redirection.
    1640             :      */
    1641        4924 :     econtext->ecxt_scantuple = existing;
    1642        4924 :     econtext->ecxt_innertuple = excludedSlot;
    1643        4924 :     econtext->ecxt_outertuple = NULL;
    1644             : 
    1645        4924 :     if (!ExecQual(onConflictSetWhere, econtext))
    1646             :     {
    1647          22 :         ExecClearTuple(existing);   /* see return below */
    1648          22 :         InstrCountFiltered1(&mtstate->ps, 1);
    1649          22 :         return true;            /* done with the tuple */
    1650             :     }
    1651             : 
    1652        4902 :     if (resultRelInfo->ri_WithCheckOptions != NIL)
    1653             :     {
    1654             :         /*
    1655             :          * Check target's existing tuple against UPDATE-applicable USING
    1656             :          * security barrier quals (if any), enforced here as RLS checks/WCOs.
    1657             :          *
    1658             :          * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
    1659             :          * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
    1660             :          * but that's almost the extent of its special handling for ON
    1661             :          * CONFLICT DO UPDATE.
    1662             :          *
    1663             :          * The rewriter will also have associated UPDATE applicable straight
    1664             :          * RLS checks/WCOs for the benefit of the ExecUpdate() call that
    1665             :          * follows.  INSERTs and UPDATEs naturally have mutually exclusive WCO
    1666             :          * kinds, so there is no danger of spurious over-enforcement in the
    1667             :          * INSERT or UPDATE path.
    1668             :          */
    1669          40 :         ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
    1670             :                              existing,
    1671             :                              mtstate->ps.state);
    1672             :     }
    1673             : 
    1674             :     /* Project the new tuple version */
    1675        4886 :     ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
    1676             : 
    1677             :     /*
    1678             :      * Note that it is possible that the target tuple has been modified in
    1679             :      * this session, after the above table_tuple_lock. We choose to not error
    1680             :      * out in that case, in line with ExecUpdate's treatment of similar cases.
    1681             :      * This can happen if an UPDATE is triggered from within ExecQual(),
    1682             :      * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
    1683             :      * wCTE in the ON CONFLICT's SET.
    1684             :      */
    1685             : 
    1686             :     /* Execute UPDATE with projection */
    1687        9772 :     *returning = ExecUpdate(mtstate, conflictTid, NULL,
    1688        4886 :                             resultRelInfo->ri_onConflict->oc_ProjSlot,
    1689             :                             planSlot,
    1690             :                             &mtstate->mt_epqstate, mtstate->ps.state,
    1691             :                             canSetTag);
    1692             : 
    1693             :     /*
    1694             :      * Clear out existing tuple, as there might not be another conflict among
    1695             :      * the next input rows. Don't want to hold resources till the end of the
    1696             :      * query.
    1697             :      */
    1698        4866 :     ExecClearTuple(existing);
    1699        4866 :     return true;
    1700             : }
    1701             : 
    1702             : 
    1703             : /*
    1704             :  * Process BEFORE EACH STATEMENT triggers
    1705             :  */
    1706             : static void
    1707       72656 : fireBSTriggers(ModifyTableState *node)
    1708             : {
    1709       72656 :     ModifyTable *plan = (ModifyTable *) node->ps.plan;
    1710       72656 :     ResultRelInfo *resultRelInfo = node->resultRelInfo;
    1711             : 
    1712             :     /*
    1713             :      * If the node modifies a partitioned table, we must fire its triggers.
    1714             :      * Note that in that case, node->resultRelInfo points to the first leaf
    1715             :      * partition, not the root table.
    1716             :      */
    1717       72656 :     if (node->rootResultRelInfo != NULL)
    1718        2280 :         resultRelInfo = node->rootResultRelInfo;
    1719             : 
    1720       72656 :     switch (node->operation)
    1721             :     {
    1722             :         case CMD_INSERT:
    1723       60388 :             ExecBSInsertTriggers(node->ps.state, resultRelInfo);
    1724       60380 :             if (plan->onConflictAction == ONCONFLICT_UPDATE)
    1725         548 :                 ExecBSUpdateTriggers(node->ps.state,
    1726             :                                      resultRelInfo);
    1727       60380 :             break;
    1728             :         case CMD_UPDATE:
    1729        9504 :             ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
    1730        9504 :             break;
    1731             :         case CMD_DELETE:
    1732        2764 :             ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
    1733        2764 :             break;
    1734             :         default:
    1735           0 :             elog(ERROR, "unknown operation");
    1736             :             break;
    1737             :     }
    1738       72648 : }
    1739             : 
    1740             : /*
    1741             :  * Return the target rel ResultRelInfo.
    1742             :  *
    1743             :  * This relation is the same as :
    1744             :  * - the relation for which we will fire AFTER STATEMENT triggers.
    1745             :  * - the relation into whose tuple format all captured transition tuples must
    1746             :  *   be converted.
    1747             :  * - the root partitioned table.
    1748             :  */
    1749             : static ResultRelInfo *
    1750      217414 : getTargetResultRelInfo(ModifyTableState *node)
    1751             : {
    1752             :     /*
    1753             :      * Note that if the node modifies a partitioned table, node->resultRelInfo
    1754             :      * points to the first leaf partition, not the root table.
    1755             :      */
    1756      217414 :     if (node->rootResultRelInfo != NULL)
    1757        7132 :         return node->rootResultRelInfo;
    1758             :     else
    1759      210282 :         return node->resultRelInfo;
    1760             : }
    1761             : 
    1762             : /*
    1763             :  * Process AFTER EACH STATEMENT triggers
    1764             :  */
    1765             : static void
    1766       70928 : fireASTriggers(ModifyTableState *node)
    1767             : {
    1768       70928 :     ModifyTable *plan = (ModifyTable *) node->ps.plan;
    1769       70928 :     ResultRelInfo *resultRelInfo = getTargetResultRelInfo(node);
    1770             : 
    1771       70928 :     switch (node->operation)
    1772             :     {
    1773             :         case CMD_INSERT:
    1774       59044 :             if (plan->onConflictAction == ONCONFLICT_UPDATE)
    1775         480 :                 ExecASUpdateTriggers(node->ps.state,
    1776             :                                      resultRelInfo,
    1777         480 :                                      node->mt_oc_transition_capture);
    1778       59044 :             ExecASInsertTriggers(node->ps.state, resultRelInfo,
    1779       59044 :                                  node->mt_transition_capture);
    1780       59044 :             break;
    1781             :         case CMD_UPDATE:
    1782        9216 :             ExecASUpdateTriggers(node->ps.state, resultRelInfo,
    1783        9216 :                                  node->mt_transition_capture);
    1784        9216 :             break;
    1785             :         case CMD_DELETE:
    1786        2668 :             ExecASDeleteTriggers(node->ps.state, resultRelInfo,
    1787        2668 :                                  node->mt_transition_capture);
    1788        2668 :             break;
    1789             :         default:
    1790           0 :             elog(ERROR, "unknown operation");
    1791             :             break;
    1792             :     }
    1793       70928 : }
    1794             : 
    1795             : /*
    1796             :  * Set up the state needed for collecting transition tuples for AFTER
    1797             :  * triggers.
    1798             :  */
    1799             : static void
    1800       72680 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
    1801             : {
    1802       72680 :     ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
    1803       72680 :     ResultRelInfo *targetRelInfo = getTargetResultRelInfo(mtstate);
    1804             : 
    1805             :     /* Check for transition tables on the directly targeted relation. */
    1806       72680 :     mtstate->mt_transition_capture =
    1807      145360 :         MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
    1808       72680 :                                    RelationGetRelid(targetRelInfo->ri_RelationDesc),
    1809             :                                    mtstate->operation);
    1810      133068 :     if (plan->operation == CMD_INSERT &&
    1811       60388 :         plan->onConflictAction == ONCONFLICT_UPDATE)
    1812         548 :         mtstate->mt_oc_transition_capture =
    1813         548 :             MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
    1814         548 :                                        RelationGetRelid(targetRelInfo->ri_RelationDesc),
    1815             :                                        CMD_UPDATE);
    1816             : 
    1817             :     /*
    1818             :      * If we found that we need to collect transition tuples then we may also
    1819             :      * need tuple conversion maps for any children that have TupleDescs that
    1820             :      * aren't compatible with the tuplestores.  (We can share these maps
    1821             :      * between the regular and ON CONFLICT cases.)
    1822             :      */
    1823      145082 :     if (mtstate->mt_transition_capture != NULL ||
    1824       72402 :         mtstate->mt_oc_transition_capture != NULL)
    1825             :     {
    1826         282 :         ExecSetupChildParentMapForSubplan(mtstate);
    1827             : 
    1828             :         /*
    1829             :          * Install the conversion map for the first plan for UPDATE and DELETE
    1830             :          * operations.  It will be advanced each time we switch to the next
    1831             :          * plan.  (INSERT operations set it every time, so we need not update
    1832             :          * mtstate->mt_oc_transition_capture here.)
    1833             :          */
    1834         282 :         if (mtstate->mt_transition_capture && mtstate->operation != CMD_INSERT)
    1835         332 :             mtstate->mt_transition_capture->tcs_map =
    1836         166 :                 tupconv_map_for_subplan(mtstate, 0);
    1837             :     }
    1838       72680 : }
    1839             : 
    1840             : /*
    1841             :  * ExecPrepareTupleRouting --- prepare for routing one tuple
    1842             :  *
    1843             :  * Determine the partition in which the tuple in slot is to be inserted,
    1844             :  * and modify mtstate and estate to prepare for it.
    1845             :  *
    1846             :  * Caller must revert the estate changes after executing the insertion!
    1847             :  * In mtstate, transition capture changes may also need to be reverted.
    1848             :  *
    1849             :  * Returns a slot holding the tuple of the partition rowtype.
    1850             :  */
    1851             : static TupleTableSlot *
    1852      490742 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
    1853             :                         EState *estate,
    1854             :                         PartitionTupleRouting *proute,
    1855             :                         ResultRelInfo *targetRelInfo,
    1856             :                         TupleTableSlot *slot)
    1857             : {
    1858             :     ResultRelInfo *partrel;
    1859             :     PartitionRoutingInfo *partrouteinfo;
    1860             :     TupleConversionMap *map;
    1861             : 
    1862             :     /*
    1863             :      * Lookup the target partition's ResultRelInfo.  If ExecFindPartition does
    1864             :      * not find a valid partition for the tuple in 'slot' then an error is
    1865             :      * raised.  An error may also be raised if the found partition is not a
    1866             :      * valid target for INSERTs.  This is required since a partitioned table
    1867             :      * UPDATE to another partition becomes a DELETE+INSERT.
    1868             :      */
    1869      490742 :     partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
    1870      490646 :     partrouteinfo = partrel->ri_PartitionInfo;
    1871             :     Assert(partrouteinfo != NULL);
    1872             : 
    1873             :     /*
    1874             :      * Make it look like we are inserting into the partition.
    1875             :      */
    1876      490646 :     estate->es_result_relation_info = partrel;
    1877             : 
    1878             :     /*
    1879             :      * If we're capturing transition tuples, we might need to convert from the
    1880             :      * partition rowtype to root partitioned table's rowtype.
    1881             :      */
    1882      490646 :     if (mtstate->mt_transition_capture != NULL)
    1883             :     {
    1884         112 :         if (partrel->ri_TrigDesc &&
    1885          28 :             partrel->ri_TrigDesc->trig_insert_before_row)
    1886             :         {
    1887             :             /*
    1888             :              * If there are any BEFORE triggers on the partition, we'll have
    1889             :              * to be ready to convert their result back to tuplestore format.
    1890             :              */
    1891          16 :             mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
    1892          32 :             mtstate->mt_transition_capture->tcs_map =
    1893          16 :                 partrouteinfo->pi_PartitionToRootMap;
    1894             :         }
    1895             :         else
    1896             :         {
    1897             :             /*
    1898             :              * Otherwise, just remember the original unconverted tuple, to
    1899             :              * avoid a needless round trip conversion.
    1900             :              */
    1901          68 :             mtstate->mt_transition_capture->tcs_original_insert_tuple = slot;
    1902          68 :             mtstate->mt_transition_capture->tcs_map = NULL;
    1903             :         }
    1904             :     }
    1905      490646 :     if (mtstate->mt_oc_transition_capture != NULL)
    1906             :     {
    1907          72 :         mtstate->mt_oc_transition_capture->tcs_map =
    1908          36 :             partrouteinfo->pi_PartitionToRootMap;
    1909             :     }
    1910             : 
    1911             :     /*
    1912             :      * Convert the tuple, if necessary.
    1913             :      */
    1914      490646 :     map = partrouteinfo->pi_RootToPartitionMap;
    1915      490646 :     if (map != NULL)
    1916             :     {
    1917       44880 :         TupleTableSlot *new_slot = partrouteinfo->pi_PartitionTupleSlot;
    1918             : 
    1919       44880 :         slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
    1920             :     }
    1921             : 
    1922      490646 :     return slot;
    1923             : }
    1924             : 
    1925             : /*
    1926             :  * Initialize the child-to-root tuple conversion map array for UPDATE subplans.
    1927             :  *
    1928             :  * This map array is required to convert the tuple from the subplan result rel
    1929             :  * to the target table descriptor. This requirement arises for two independent
    1930             :  * scenarios:
    1931             :  * 1. For update-tuple-routing.
    1932             :  * 2. For capturing tuples in transition tables.
    1933             :  */
    1934             : static void
    1935         680 : ExecSetupChildParentMapForSubplan(ModifyTableState *mtstate)
    1936             : {
    1937         680 :     ResultRelInfo *targetRelInfo = getTargetResultRelInfo(mtstate);
    1938         680 :     ResultRelInfo *resultRelInfos = mtstate->resultRelInfo;
    1939             :     TupleDesc   outdesc;
    1940         680 :     int         numResultRelInfos = mtstate->mt_nplans;
    1941             :     int         i;
    1942             : 
    1943             :     /*
    1944             :      * Build array of conversion maps from each child's TupleDesc to the one
    1945             :      * used in the target relation.  The map pointers may be NULL when no
    1946             :      * conversion is necessary, which is hopefully a common case.
    1947             :      */
    1948             : 
    1949             :     /* Get tuple descriptor of the target rel. */
    1950         680 :     outdesc = RelationGetDescr(targetRelInfo->ri_RelationDesc);
    1951             : 
    1952         680 :     mtstate->mt_per_subplan_tupconv_maps = (TupleConversionMap **)
    1953         680 :         palloc(sizeof(TupleConversionMap *) * numResultRelInfos);
    1954             : 
    1955        2000 :     for (i = 0; i < numResultRelInfos; ++i)
    1956             :     {
    1957        2640 :         mtstate->mt_per_subplan_tupconv_maps[i] =
    1958        1320 :             convert_tuples_by_name(RelationGetDescr(resultRelInfos[i].ri_RelationDesc),
    1959             :                                    outdesc);
    1960             :     }
    1961         680 : }
    1962             : 
    1963             : /*
    1964             :  * For a given subplan index, get the tuple conversion map.
    1965             :  */
    1966             : static TupleConversionMap *
    1967         594 : tupconv_map_for_subplan(ModifyTableState *mtstate, int whichplan)
    1968             : {
    1969             :     /* If nobody else set the per-subplan array of maps, do so ourselves. */
    1970         594 :     if (mtstate->mt_per_subplan_tupconv_maps == NULL)
    1971           0 :         ExecSetupChildParentMapForSubplan(mtstate);
    1972             : 
    1973             :     Assert(whichplan >= 0 && whichplan < mtstate->mt_nplans);
    1974         594 :     return mtstate->mt_per_subplan_tupconv_maps[whichplan];
    1975             : }
    1976             : 
    1977             : /* ----------------------------------------------------------------
    1978             :  *     ExecModifyTable
    1979             :  *
    1980             :  *      Perform table modifications as required, and return RETURNING results
    1981             :  *      if needed.
    1982             :  * ----------------------------------------------------------------
    1983             :  */
    1984             : static TupleTableSlot *
    1985       77476 : ExecModifyTable(PlanState *pstate)
    1986             : {
    1987       77476 :     ModifyTableState *node = castNode(ModifyTableState, pstate);
    1988       77476 :     PartitionTupleRouting *proute = node->mt_partition_tuple_routing;
    1989       77476 :     EState     *estate = node->ps.state;
    1990       77476 :     CmdType     operation = node->operation;
    1991             :     ResultRelInfo *saved_resultRelInfo;
    1992             :     ResultRelInfo *resultRelInfo;
    1993             :     PlanState  *subplanstate;
    1994             :     JunkFilter *junkfilter;
    1995             :     TupleTableSlot *slot;
    1996             :     TupleTableSlot *planSlot;
    1997             :     ItemPointer tupleid;
    1998             :     ItemPointerData tuple_ctid;
    1999             :     HeapTupleData oldtupdata;
    2000             :     HeapTuple   oldtuple;
    2001             : 
    2002       77476 :     CHECK_FOR_INTERRUPTS();
    2003             : 
    2004             :     /*
    2005             :      * This should NOT get called during EvalPlanQual; we should have passed a
    2006             :      * subplan tree to EvalPlanQual, instead.  Use a runtime test not just
    2007             :      * Assert because this condition is easy to miss in testing.  (Note:
    2008             :      * although ModifyTable should not get executed within an EvalPlanQual
    2009             :      * operation, we do have to allow it to be initialized and shut down in
    2010             :      * case it is within a CTE subplan.  Hence this test must be here, not in
    2011             :      * ExecInitModifyTable.)
    2012             :      */
    2013       77476 :     if (estate->es_epq_active != NULL)
    2014           0 :         elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
    2015             : 
    2016             :     /*
    2017             :      * If we've already completed processing, don't try to do more.  We need
    2018             :      * this test because ExecPostprocessPlan might call us an extra time, and
    2019             :      * our subplan's nodes aren't necessarily robust against being called
    2020             :      * extra times.
    2021             :      */
    2022       77476 :     if (node->mt_done)
    2023         484 :         return NULL;
    2024             : 
    2025             :     /*
    2026             :      * On first call, fire BEFORE STATEMENT triggers before proceeding.
    2027             :      */
    2028       76992 :     if (node->fireBSTriggers)
    2029             :     {
    2030       72656 :         fireBSTriggers(node);
    2031       72648 :         node->fireBSTriggers = false;
    2032             :     }
    2033             : 
    2034             :     /* Preload local variables */
    2035       76984 :     resultRelInfo = node->resultRelInfo + node->mt_whichplan;
    2036       76984 :     subplanstate = node->mt_plans[node->mt_whichplan];
    2037       76984 :     junkfilter = resultRelInfo->ri_junkFilter;
    2038             : 
    2039             :     /*
    2040             :      * es_result_relation_info must point to the currently active result
    2041             :      * relation while we are within this ModifyTable node.  Even though
    2042             :      * ModifyTable nodes can't be nested statically, they can be nested
    2043             :      * dynamically (since our subplan could include a reference to a modifying
    2044             :      * CTE).  So we have to save and restore the caller's value.
    2045             :      */
    2046       76984 :     saved_resultRelInfo = estate->es_result_relation_info;
    2047             : 
    2048       76984 :     estate->es_result_relation_info = resultRelInfo;
    2049             : 
    2050             :     /*
    2051             :      * Fetch rows from subplan(s), and execute the required table modification
    2052             :      * for each row.
    2053             :      */
    2054             :     for (;;)
    2055             :     {
    2056             :         /*
    2057             :          * Reset the per-output-tuple exprcontext.  This is needed because
    2058             :          * triggers expect to use that context as workspace.  It's a bit ugly
    2059             :          * to do this below the top level of the plan, however.  We might need
    2060             :          * to rethink this later.
    2061             :          */
    2062    24704576 :         ResetPerTupleExprContext(estate);
    2063             : 
    2064             :         /*
    2065             :          * Reset per-tuple memory context used for processing on conflict and
    2066             :          * returning clauses, to free any expression evaluation storage
    2067             :          * allocated in the previous cycle.
    2068             :          */
    2069    12390780 :         if (pstate->ps_ExprContext)
    2070       15222 :             ResetExprContext(pstate->ps_ExprContext);
    2071             : 
    2072    12390780 :         planSlot = ExecProcNode(subplanstate);
    2073             : 
    2074    12390594 :         if (TupIsNull(planSlot))
    2075             :         {
    2076             :             /* advance to next subplan if any */
    2077       71838 :             node->mt_whichplan++;
    2078       71838 :             if (node->mt_whichplan < node->mt_nplans)
    2079             :             {
    2080         910 :                 resultRelInfo++;
    2081         910 :                 subplanstate = node->mt_plans[node->mt_whichplan];
    2082         910 :                 junkfilter = resultRelInfo->ri_junkFilter;
    2083         910 :                 estate->es_result_relation_info = resultRelInfo;
    2084         910 :                 EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan,
    2085         910 :                                     node->mt_arowmarks[node->mt_whichplan]);
    2086             :                 /* Prepare to convert transition tuples from this child. */
    2087         910 :                 if (node->mt_transition_capture != NULL)
    2088             :                 {
    2089         264 :                     node->mt_transition_capture->tcs_map =
    2090         132 :                         tupconv_map_for_subplan(node, node->mt_whichplan);
    2091             :                 }
    2092         910 :                 if (node->mt_oc_transition_capture != NULL)
    2093             :                 {
    2094           0 :                     node->mt_oc_transition_capture->tcs_map =
    2095           0 :                         tupconv_map_for_subplan(node, node->mt_whichplan);
    2096             :                 }
    2097         910 :                 continue;
    2098             :             }
    2099             :             else
    2100       70928 :                 break;
    2101             :         }
    2102             : 
    2103             :         /*
    2104             :          * Ensure input tuple is the right format for the target relation.
    2105             :          */
    2106    12318756 :         if (node->mt_scans[node->mt_whichplan]->tts_ops != planSlot->tts_ops)
    2107             :         {
    2108    11756044 :             ExecCopySlot(node->mt_scans[node->mt_whichplan], planSlot);
    2109    11756044 :             planSlot = node->mt_scans[node->mt_whichplan];
    2110             :         }
    2111             : 
    2112             :         /*
    2113             :          * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
    2114             :          * here is compute the RETURNING expressions.
    2115             :          */
    2116    12318756 :         if (resultRelInfo->ri_usesFdwDirectModify)
    2117             :         {
    2118             :             Assert(resultRelInfo->ri_projectReturning);
    2119             : 
    2120             :             /*
    2121             :              * A scan slot containing the data that was actually inserted,
    2122             :              * updated or deleted has already been made available to
    2123             :              * ExecProcessReturning by IterateDirectModify, so no need to
    2124             :              * provide it here.
    2125             :              */
    2126         692 :             slot = ExecProcessReturning(resultRelInfo, NULL, planSlot);
    2127             : 
    2128         692 :             estate->es_result_relation_info = saved_resultRelInfo;
    2129         692 :             return slot;
    2130             :         }
    2131             : 
    2132    12318064 :         EvalPlanQualSetSlot(&node->mt_epqstate, planSlot);
    2133    12318064 :         slot = planSlot;
    2134             : 
    2135    12318064 :         tupleid = NULL;
    2136    12318064 :         oldtuple = NULL;
    2137    12318064 :         if (junkfilter != NULL)
    2138             :         {
    2139             :             /*
    2140             :              * extract the 'ctid' or 'wholerow' junk attribute.
    2141             :              */
    2142      945186 :             if (operation == CMD_UPDATE || operation == CMD_DELETE)
    2143             :             {
    2144             :                 char        relkind;
    2145             :                 Datum       datum;
    2146             :                 bool        isNull;
    2147             : 
    2148      945186 :                 relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
    2149      945186 :                 if (relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW)
    2150             :                 {
    2151      944968 :                     datum = ExecGetJunkAttribute(slot,
    2152      944968 :                                                  junkfilter->jf_junkAttNo,
    2153             :                                                  &isNull);
    2154             :                     /* shouldn't ever get a null result... */
    2155      944968 :                     if (isNull)
    2156           0 :                         elog(ERROR, "ctid is NULL");
    2157             : 
    2158      944968 :                     tupleid = (ItemPointer) DatumGetPointer(datum);
    2159      944968 :                     tuple_ctid = *tupleid;  /* be sure we don't free ctid!! */
    2160      944968 :                     tupleid = &tuple_ctid;
    2161             :                 }
    2162             : 
    2163             :                 /*
    2164             :                  * Use the wholerow attribute, when available, to reconstruct
    2165             :                  * the old relation tuple.
    2166             :                  *
    2167             :                  * Foreign table updates have a wholerow attribute when the
    2168             :                  * relation has a row-level trigger.  Note that the wholerow
    2169             :                  * attribute does not carry system columns.  Foreign table
    2170             :                  * triggers miss seeing those, except that we know enough here
    2171             :                  * to set t_tableOid.  Quite separately from this, the FDW may
    2172             :                  * fetch its own junk attrs to identify the row.
    2173             :                  *
    2174             :                  * Other relevant relkinds, currently limited to views, always
    2175             :                  * have a wholerow attribute.
    2176             :                  */
    2177         218 :                 else if (AttributeNumberIsValid(junkfilter->jf_junkAttNo))
    2178             :                 {
    2179         170 :                     datum = ExecGetJunkAttribute(slot,
    2180         170 :                                                  junkfilter->jf_junkAttNo,
    2181             :                                                  &isNull);
    2182             :                     /* shouldn't ever get a null result... */
    2183         170 :                     if (isNull)
    2184           0 :                         elog(ERROR, "wholerow is NULL");
    2185             : 
    2186         170 :                     oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
    2187         170 :                     oldtupdata.t_len =
    2188         170 :                         HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
    2189         170 :                     ItemPointerSetInvalid(&(oldtupdata.t_self));
    2190             :                     /* Historically, view triggers see invalid t_tableOid. */
    2191         170 :                     oldtupdata.t_tableOid =
    2192         224 :                         (relkind == RELKIND_VIEW) ? InvalidOid :
    2193          54 :                         RelationGetRelid(resultRelInfo->ri_RelationDesc);
    2194             : 
    2195         170 :                     oldtuple = &oldtupdata;
    2196             :                 }
    2197             :                 else
    2198             :                     Assert(relkind == RELKIND_FOREIGN_TABLE);
    2199             :             }
    2200             : 
    2201             :             /*
    2202             :              * apply the junkfilter if needed.
    2203             :              */
    2204      945186 :             if (operation != CMD_DELETE)
    2205      114082 :                 slot = ExecFilterJunk(junkfilter, slot);
    2206             :         }
    2207             : 
    2208    12318064 :         switch (operation)
    2209             :         {
    2210             :             case CMD_INSERT:
    2211             :                 /* Prepare for tuple routing if needed. */
    2212    11372878 :                 if (proute)
    2213      490446 :                     slot = ExecPrepareTupleRouting(node, estate, proute,
    2214             :                                                    resultRelInfo, slot);
    2215    11372792 :                 slot = ExecInsert(node, slot, planSlot,
    2216    11372792 :                                   estate, node->canSetTag);
    2217             :                 /* Revert ExecPrepareTupleRouting's state change. */
    2218    11371630 :                 if (proute)
    2219      490270 :                     estate->es_result_relation_info = resultRelInfo;
    2220    11371630 :                 break;
    2221             :             case CMD_UPDATE:
    2222      114082 :                 slot = ExecUpdate(node, tupleid, oldtuple, slot, planSlot,
    2223      114082 :                                   &node->mt_epqstate, estate, node->canSetTag);
    2224      113874 :                 break;
    2225             :             case CMD_DELETE:
    2226      831104 :                 slot = ExecDelete(node, tupleid, oldtuple, planSlot,
    2227             :                                   &node->mt_epqstate, estate,
    2228      831104 :                                   true, node->canSetTag,
    2229             :                                   false /* changingPart */ , NULL, NULL);
    2230      831036 :                 break;
    2231             :             default:
    2232           0 :                 elog(ERROR, "unknown operation");
    2233             :                 break;
    2234             :         }
    2235             : 
    2236             :         /*
    2237             :          * If we got a RETURNING result, return it to caller.  We'll continue
    2238             :          * the work on next call.
    2239             :          */
    2240    12316540 :         if (slot)
    2241             :         {
    2242        3654 :             estate->es_result_relation_info = saved_resultRelInfo;
    2243        3654 :             return slot;
    2244             :         }
    2245             :     }
    2246             : 
    2247             :     /* Restore es_result_relation_info before exiting */
    2248       70928 :     estate->es_result_relation_info = saved_resultRelInfo;
    2249             : 
    2250             :     /*
    2251             :      * We're done, but fire AFTER STATEMENT triggers before exiting.
    2252             :      */
    2253       70928 :     fireASTriggers(node);
    2254             : 
    2255       70928 :     node->mt_done = true;
    2256             : 
    2257       70928 :     return NULL;
    2258             : }
    2259             : 
    2260             : /* ----------------------------------------------------------------
    2261             :  *      ExecInitModifyTable
    2262             :  * ----------------------------------------------------------------
    2263             :  */
    2264             : ModifyTableState *
    2265       73136 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
    2266             : {
    2267             :     ModifyTableState *mtstate;
    2268       73136 :     CmdType     operation = node->operation;
    2269       73136 :     int         nplans = list_length(node->plans);
    2270             :     ResultRelInfo *saved_resultRelInfo;
    2271             :     ResultRelInfo *resultRelInfo;
    2272             :     Plan       *subplan;
    2273             :     ListCell   *l;
    2274             :     int         i;
    2275             :     Relation    rel;
    2276       73136 :     bool        update_tuple_routing_needed = node->partColsUpdated;
    2277             : 
    2278             :     /* check for unsupported flags */
    2279             :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
    2280             : 
    2281             :     /*
    2282             :      * create state structure
    2283             :      */
    2284       73136 :     mtstate = makeNode(ModifyTableState);
    2285       73136 :     mtstate->ps.plan = (Plan *) node;
    2286       73136 :     mtstate->ps.state = estate;
    2287       73136 :     mtstate->ps.ExecProcNode = ExecModifyTable;
    2288             : 
    2289       73136 :     mtstate->operation = operation;
    2290       73136 :     mtstate->canSetTag = node->canSetTag;
    2291       73136 :     mtstate->mt_done = false;
    2292             : 
    2293       73136 :     mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans);
    2294       73136 :     mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex;
    2295       73136 :     mtstate->mt_scans = (TupleTableSlot **) palloc0(sizeof(TupleTableSlot *) * nplans);
    2296             : 
    2297             :     /* If modifying a partitioned table, initialize the root table info */
    2298       73136 :     if (node->rootResultRelIndex >= 0)
    2299        4700 :         mtstate->rootResultRelInfo = estate->es_root_result_relations +
    2300        2350 :             node->rootResultRelIndex;
    2301             : 
    2302       73136 :     mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
    2303       73136 :     mtstate->mt_nplans = nplans;
    2304             : 
    2305             :     /* set up epqstate with dummy subplan data for the moment */
    2306       73136 :     EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam);
    2307       73136 :     mtstate->fireBSTriggers = true;
    2308             : 
    2309             :     /*
    2310             :      * call ExecInitNode on each of the plans to be executed and save the
    2311             :      * results into the array "mt_plans".  This is also a convenient place to
    2312             :      * verify that the proposed target relations are valid and open their
    2313             :      * indexes for insertion of new index entries.  Note we *must* set
    2314             :      * estate->es_result_relation_info correctly while we initialize each
    2315             :      * sub-plan; external modules such as FDWs may depend on that (see
    2316             :      * contrib/postgres_fdw/postgres_fdw.c: postgresBeginDirectModify() as one
    2317             :      * example).
    2318             :      */
    2319       73136 :     saved_resultRelInfo = estate->es_result_relation_info;
    2320             : 
    2321       73136 :     resultRelInfo = mtstate->resultRelInfo;
    2322       73136 :     i = 0;
    2323      147444 :     foreach(l, node->plans)
    2324             :     {
    2325       74318 :         subplan = (Plan *) lfirst(l);
    2326             : 
    2327             :         /* Initialize the usesFdwDirectModify flag */
    2328       74318 :         resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
    2329       74318 :                                                               node->fdwDirectModifyPlans);
    2330             : 
    2331             :         /*
    2332             :          * Verify result relation is a valid target for the current operation
    2333             :          */
    2334       74318 :         CheckValidResultRel(resultRelInfo, operation);
    2335             : 
    2336             :         /*
    2337             :          * If there are indices on the result relation, open them and save
    2338             :          * descriptors in the result relation info, so that we can add new
    2339             :          * index entries for the tuples we add/update.  We need not do this
    2340             :          * for a DELETE, however, since deletion doesn't affect indexes. Also,
    2341             :          * inside an EvalPlanQual operation, the indexes might be open
    2342             :          * already, since we share the resultrel state with the original
    2343             :          * query.
    2344             :          */
    2345       74308 :         if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex &&
    2346       29956 :             operation != CMD_DELETE &&
    2347       29956 :             resultRelInfo->ri_IndexRelationDescs == NULL)
    2348       29942 :             ExecOpenIndices(resultRelInfo,
    2349       29942 :                             node->onConflictAction != ONCONFLICT_NONE);
    2350             : 
    2351             :         /*
    2352             :          * If this is an UPDATE and a BEFORE UPDATE trigger is present, the
    2353             :          * trigger itself might modify the partition-key values. So arrange
    2354             :          * for tuple routing.
    2355             :          */
    2356       81590 :         if (resultRelInfo->ri_TrigDesc &&
    2357        9444 :             resultRelInfo->ri_TrigDesc->trig_update_before_row &&
    2358             :             operation == CMD_UPDATE)
    2359         870 :             update_tuple_routing_needed = true;
    2360             : 
    2361             :         /* Now init the plan for this result rel */
    2362       74308 :         estate->es_result_relation_info = resultRelInfo;
    2363       74308 :         mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags);
    2364      148616 :         mtstate->mt_scans[i] =
    2365       74308 :             ExecInitExtraTupleSlot(mtstate->ps.state, ExecGetResultType(mtstate->mt_plans[i]),
    2366             :                                    table_slot_callbacks(resultRelInfo->ri_RelationDesc));
    2367             : 
    2368             :         /* Also let FDWs init themselves for foreign-table result rels */
    2369      148450 :         if (!resultRelInfo->ri_usesFdwDirectModify &&
    2370       74330 :             resultRelInfo->ri_FdwRoutine != NULL &&
    2371         188 :             resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
    2372             :         {
    2373         188 :             List       *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
    2374             : 
    2375         188 :             resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
    2376             :                                                              resultRelInfo,
    2377             :                                                              fdw_private,
    2378             :                                                              i,
    2379             :                                                              eflags);
    2380             :         }
    2381             : 
    2382       74308 :         resultRelInfo++;
    2383       74308 :         i++;
    2384             :     }
    2385             : 
    2386       73126 :     estate->es_result_relation_info = saved_resultRelInfo;
    2387             : 
    2388             :     /* Get the target relation */
    2389       73126 :     rel = (getTargetResultRelInfo(mtstate))->ri_RelationDesc;
    2390             : 
    2391             :     /*
    2392             :      * If it's not a partitioned table after all, UPDATE tuple routing should
    2393             :      * not be attempted.
    2394             :      */
    2395       73126 :     if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
    2396       70776 :         update_tuple_routing_needed = false;
    2397             : 
    2398             :     /*
    2399             :      * Build state for tuple routing if it's an INSERT or if it's an UPDATE of
    2400             :      * partition key.
    2401             :      */
    2402       73126 :     if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
    2403         860 :         (operation == CMD_INSERT || update_tuple_routing_needed))
    2404        1888 :         mtstate->mt_partition_tuple_routing =
    2405        1888 :             ExecSetupPartitionTupleRouting(estate, mtstate, rel);
    2406             : 
    2407             :     /*
    2408             :      * Build state for collecting transition tuples.  This requires having a
    2409             :      * valid trigger query context, so skip it in explain-only mode.
    2410             :      */
    2411       73126 :     if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
    2412       72680 :         ExecSetupTransitionCaptureState(mtstate, estate);
    2413             : 
    2414             :     /*
    2415             :      * Construct mapping from each of the per-subplan partition attnos to the
    2416             :      * root attno.  This is required when during update row movement the tuple
    2417             :      * descriptor of a source partition does not match the root partitioned
    2418             :      * table descriptor.  In such a case we need to convert tuples to the root
    2419             :      * tuple descriptor, because the search for destination partition starts
    2420             :      * from the root.  We'll also need a slot to store these converted tuples.
    2421             :      * We can skip this setup if it's not a partition key update.
    2422             :      */
    2423       73126 :     if (update_tuple_routing_needed)
    2424             :     {
    2425         398 :         ExecSetupChildParentMapForSubplan(mtstate);
    2426         398 :         mtstate->mt_root_tuple_slot = table_slot_create(rel, NULL);
    2427             :     }
    2428             : 
    2429             :     /*
    2430             :      * Initialize any WITH CHECK OPTION constraints if needed.
    2431             :      */
    2432       73126 :     resultRelInfo = mtstate->resultRelInfo;
    2433       73126 :     i = 0;
    2434       73906 :     foreach(l, node->withCheckOptionLists)
    2435             :     {
    2436         780 :         List       *wcoList = (List *) lfirst(l);
    2437         780 :         List       *wcoExprs = NIL;
    2438             :         ListCell   *ll;
    2439             : 
    2440        1926 :         foreach(ll, wcoList)
    2441             :         {
    2442        1146 :             WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
    2443        1146 :             ExprState  *wcoExpr = ExecInitQual((List *) wco->qual,
    2444             :                                                &mtstate->ps);
    2445             : 
    2446        1146 :             wcoExprs = lappend(wcoExprs, wcoExpr);
    2447             :         }
    2448             : 
    2449         780 :         resultRelInfo->ri_WithCheckOptions = wcoList;
    2450         780 :         resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
    2451         780 :         resultRelInfo++;
    2452         780 :         i++;
    2453             :     }
    2454             : 
    2455             :     /*
    2456             :      * Initialize RETURNING projections if needed.
    2457             :      */
    2458       73126 :     if (node->returningLists)
    2459             :     {
    2460             :         TupleTableSlot *slot;
    2461             :         ExprContext *econtext;
    2462             : 
    2463             :         /*
    2464             :          * Initialize result tuple slot and assign its rowtype using the first
    2465             :          * RETURNING list.  We assume the rest will look the same.
    2466             :          */
    2467        1926 :         mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
    2468             : 
    2469             :         /* Set up a slot for the output of the RETURNING projection(s) */
    2470        1926 :         ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
    2471        1926 :         slot = mtstate->ps.ps_ResultTupleSlot;
    2472             : 
    2473             :         /* Need an econtext too */
    2474        1926 :         if (mtstate->ps.ps_ExprContext == NULL)
    2475        1926 :             ExecAssignExprContext(estate, &mtstate->ps);
    2476        1926 :         econtext = mtstate->ps.ps_ExprContext;
    2477             : 
    2478             :         /*
    2479             :          * Build a projection for each result rel.
    2480             :          */
    2481        1926 :         resultRelInfo = mtstate->resultRelInfo;
    2482        4038 :         foreach(l, node->returningLists)
    2483             :         {
    2484        2112 :             List       *rlist = (List *) lfirst(l);
    2485             : 
    2486        2112 :             resultRelInfo->ri_returningList = rlist;
    2487        2112 :             resultRelInfo->ri_projectReturning =
    2488        2112 :                 ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
    2489        2112 :                                         resultRelInfo->ri_RelationDesc->rd_att);
    2490        2112 :             resultRelInfo++;
    2491             :         }
    2492             :     }
    2493             :     else
    2494             :     {
    2495             :         /*
    2496             :          * We still must construct a dummy result tuple type, because InitPlan
    2497             :          * expects one (maybe should change that?).
    2498             :          */
    2499       71200 :         mtstate->ps.plan->targetlist = NIL;
    2500       71200 :         ExecInitResultTypeTL(&mtstate->ps);
    2501             : 
    2502       71200 :         mtstate->ps.ps_ExprContext = NULL;
    2503             :     }
    2504             : 
    2505             :     /* Set the list of arbiter indexes if needed for ON CONFLICT */
    2506       73126 :     resultRelInfo = mtstate->resultRelInfo;
    2507       73126 :     if (node->onConflictAction != ONCONFLICT_NONE)
    2508         816 :         resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
    2509             : 
    2510             :     /*
    2511             :      * If needed, Initialize target list, projection and qual for ON CONFLICT
    2512             :      * DO UPDATE.
    2513             :      */
    2514       73126 :     if (node->onConflictAction == ONCONFLICT_UPDATE)
    2515             :     {
    2516             :         ExprContext *econtext;
    2517             :         TupleDesc   relationDesc;
    2518             :         TupleDesc   tupDesc;
    2519             : 
    2520             :         /* insert may only have one plan, inheritance is not expanded */
    2521             :         Assert(nplans == 1);
    2522             : 
    2523             :         /* already exists if created by RETURNING processing above */
    2524         596 :         if (mtstate->ps.ps_ExprContext == NULL)
    2525         424 :             ExecAssignExprContext(estate, &mtstate->ps);
    2526             : 
    2527         596 :         econtext = mtstate->ps.ps_ExprContext;
    2528         596 :         relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
    2529             : 
    2530             :         /* carried forward solely for the benefit of explain */
    2531         596 :         mtstate->mt_excludedtlist = node->exclRelTlist;
    2532             : 
    2533             :         /* create state for DO UPDATE SET operation */
    2534         596 :         resultRelInfo->ri_onConflict = makeNode(OnConflictSetState);
    2535             : 
    2536             :         /* initialize slot for the existing tuple */
    2537        1192 :         resultRelInfo->ri_onConflict->oc_Existing =
    2538         596 :             table_slot_create(resultRelInfo->ri_RelationDesc,
    2539         596 :                               &mtstate->ps.state->es_tupleTable);
    2540             : 
    2541             :         /*
    2542             :          * Create the tuple slot for the UPDATE SET projection. We want a slot
    2543             :          * of the table's type here, because the slot will be used to insert
    2544             :          * into the table, and for RETURNING processing - which may access
    2545             :          * system attributes.
    2546             :          */
    2547         596 :         tupDesc = ExecTypeFromTL((List *) node->onConflictSet);
    2548        1192 :         resultRelInfo->ri_onConflict->oc_ProjSlot =
    2549         596 :             ExecInitExtraTupleSlot(mtstate->ps.state, tupDesc,
    2550             :                                    table_slot_callbacks(resultRelInfo->ri_RelationDesc));
    2551             : 
    2552             :         /* build UPDATE SET projection state */
    2553        1192 :         resultRelInfo->ri_onConflict->oc_ProjInfo =
    2554        1192 :             ExecBuildProjectionInfo(node->onConflictSet, econtext,
    2555         596 :                                     resultRelInfo->ri_onConflict->oc_ProjSlot,
    2556             :                                     &mtstate->ps,
    2557             :                                     relationDesc);
    2558             : 
    2559             :         /* initialize state to evaluate the WHERE clause, if any */
    2560         596 :         if (node->onConflictWhere)
    2561             :         {
    2562             :             ExprState  *qualexpr;
    2563             : 
    2564         122 :             qualexpr = ExecInitQual((List *) node->onConflictWhere,
    2565             :                                     &mtstate->ps);
    2566         122 :             resultRelInfo->ri_onConflict->oc_WhereClause = qualexpr;
    2567             :         }
    2568             :     }
    2569             : 
    2570             :     /*
    2571             :      * If we have any secondary relations in an UPDATE or DELETE, they need to
    2572             :      * be treated like non-locked relations in SELECT FOR UPDATE, ie, the
    2573             :      * EvalPlanQual mechanism needs to be told about them.  Locate the
    2574             :      * relevant ExecRowMarks.
    2575             :      */
    2576       74112 :     foreach(l, node->rowMarks)
    2577             :     {
    2578         986 :         PlanRowMark *rc = lfirst_node(PlanRowMark, l);
    2579             :         ExecRowMark *erm;
    2580             : 
    2581             :         /* ignore "parent" rowmarks; they are irrelevant at runtime */
    2582         986 :         if (rc->isParent)
    2583          64 :             continue;
    2584             : 
    2585             :         /* find ExecRowMark (same for all subplans) */
    2586         922 :         erm = ExecFindRowMark(estate, rc->rti, false);
    2587             : 
    2588             :         /* build ExecAuxRowMark for each subplan */
    2589        2172 :         for (i = 0; i < nplans; i++)
    2590             :         {
    2591             :             ExecAuxRowMark *aerm;
    2592             : 
    2593        1250 :             subplan = mtstate->mt_plans[i]->plan;
    2594        1250 :             aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
    2595        1250 :             mtstate->mt_arowmarks[i] = lappend(mtstate->mt_arowmarks[i], aerm);
    2596             :         }
    2597             :     }
    2598             : 
    2599             :     /* select first subplan */
    2600       73126 :     mtstate->mt_whichplan = 0;
    2601       73126 :     subplan = (Plan *) linitial(node->plans);
    2602       73126 :     EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan,
    2603       73126 :                         mtstate->mt_arowmarks[0]);
    2604             : 
    2605             :     /*
    2606             :      * Initialize the junk filter(s) if needed.  INSERT queries need a filter
    2607             :      * if there are any junk attrs in the tlist.  UPDATE and DELETE always
    2608             :      * need a filter, since there's always at least one junk attribute present
    2609             :      * --- no need to look first.  Typically, this will be a 'ctid' or
    2610             :      * 'wholerow' attribute, but in the case of a foreign data wrapper it
    2611             :      * might be a set of junk attributes sufficient to identify the remote
    2612             :      * row.
    2613             :      *
    2614             :      * If there are multiple result relations, each one needs its own junk
    2615             :      * filter.  Note multiple rels are only possible for UPDATE/DELETE, so we
    2616             :      * can't be fooled by some needing a filter and some not.
    2617             :      *
    2618             :      * This section of code is also a convenient place to verify that the
    2619             :      * output of an INSERT or UPDATE matches the target table(s).
    2620             :      */
    2621             :     {
    2622       73126 :         bool        junk_filter_needed = false;
    2623             : 
    2624       73126 :         switch (operation)
    2625             :         {
    2626             :             case CMD_INSERT:
    2627      281620 :                 foreach(l, subplan->targetlist)
    2628             :                 {
    2629      221108 :                     TargetEntry *tle = (TargetEntry *) lfirst(l);
    2630             : 
    2631      221108 :                     if (tle->resjunk)
    2632             :                     {
    2633           0 :                         junk_filter_needed = true;
    2634           0 :                         break;
    2635             :                     }
    2636             :                 }
    2637       60512 :                 break;
    2638             :             case CMD_UPDATE:
    2639             :             case CMD_DELETE:
    2640       12614 :                 junk_filter_needed = true;
    2641       12614 :                 break;
    2642             :             default:
    2643           0 :                 elog(ERROR, "unknown operation");
    2644             :                 break;
    2645             :         }
    2646             : 
    2647       73126 :         if (junk_filter_needed)
    2648             :         {
    2649       12614 :             resultRelInfo = mtstate->resultRelInfo;
    2650       26406 :             for (i = 0; i < nplans; i++)
    2651             :             {
    2652             :                 JunkFilter *j;
    2653             :                 TupleTableSlot *junkresslot;
    2654             : 
    2655       13792 :                 subplan = mtstate->mt_plans[i]->plan;
    2656       13792 :                 if (operation == CMD_INSERT || operation == CMD_UPDATE)
    2657       10678 :                     ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
    2658             :                                         subplan->targetlist);
    2659             : 
    2660       13792 :                 junkresslot =
    2661       13792 :                     ExecInitExtraTupleSlot(estate, NULL,
    2662             :                                            table_slot_callbacks(resultRelInfo->ri_RelationDesc));
    2663       13792 :                 j = ExecInitJunkFilter(subplan->targetlist,
    2664             :                                        junkresslot);
    2665             : 
    2666       13792 :                 if (operation == CMD_UPDATE || operation == CMD_DELETE)
    2667             :                 {
    2668             :                     /* For UPDATE/DELETE, find the appropriate junk attr now */
    2669             :                     char        relkind;
    2670             : 
    2671       13792 :                     relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
    2672       13792 :                     if (relkind == RELKIND_RELATION ||
    2673         404 :                         relkind == RELKIND_MATVIEW ||
    2674             :                         relkind == RELKIND_PARTITIONED_TABLE)
    2675             :                     {
    2676       13400 :                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
    2677       26800 :                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
    2678           0 :                             elog(ERROR, "could not find junk ctid column");
    2679             :                     }
    2680         392 :                     else if (relkind == RELKIND_FOREIGN_TABLE)
    2681             :                     {
    2682             :                         /*
    2683             :                          * When there is a row-level trigger, there should be
    2684             :                          * a wholerow attribute.
    2685             :                          */
    2686         260 :                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
    2687             :                     }
    2688             :                     else
    2689             :                     {
    2690         132 :                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
    2691         132 :                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
    2692           0 :                             elog(ERROR, "could not find junk wholerow column");
    2693             :                     }
    2694             :                 }
    2695             : 
    2696       13792 :                 resultRelInfo->ri_junkFilter = j;
    2697       13792 :                 resultRelInfo++;
    2698             :             }
    2699             :         }
    2700             :         else
    2701             :         {
    2702       60512 :             if (operation == CMD_INSERT)
    2703       60512 :                 ExecCheckPlanOutput(mtstate->resultRelInfo->ri_RelationDesc,
    2704             :                                     subplan->targetlist);
    2705             :         }
    2706             :     }
    2707             : 
    2708             :     /*
    2709             :      * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
    2710             :      * to estate->es_auxmodifytables so that it will be run to completion by
    2711             :      * ExecPostprocessPlan.  (It'd actually work fine to add the primary
    2712             :      * ModifyTable node too, but there's no need.)  Note the use of lcons not
    2713             :      * lappend: we need later-initialized ModifyTable nodes to be shut down
    2714             :      * before earlier ones.  This ensures that we don't throw away RETURNING
    2715             :      * rows that need to be seen by a later CTE subplan.
    2716             :      */
    2717       73126 :     if (!mtstate->canSetTag)
    2718         578 :         estate->es_auxmodifytables = lcons(mtstate,
    2719             :                                            estate->es_auxmodifytables);
    2720             : 
    2721       73126 :     return mtstate;
    2722             : }
    2723             : 
    2724             : /* ----------------------------------------------------------------
    2725             :  *      ExecEndModifyTable
    2726             :  *
    2727             :  *      Shuts down the plan.
    2728             :  *
    2729             :  *      Returns nothing of interest.
    2730             :  * ----------------------------------------------------------------
    2731             :  */
    2732             : void
    2733       70880 : ExecEndModifyTable(ModifyTableState *node)
    2734             : {
    2735             :     int         i;
    2736             : 
    2737             :     /*
    2738             :      * Allow any FDWs to shut down
    2739             :      */
    2740      142810 :     for (i = 0; i < node->mt_nplans; i++)
    2741             :     {
    2742       71930 :         ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
    2743             : 
    2744      143704 :         if (!resultRelInfo->ri_usesFdwDirectModify &&
    2745       71946 :             resultRelInfo->ri_FdwRoutine != NULL &&
    2746         172 :             resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
    2747         172 :             resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
    2748             :                                                            resultRelInfo);
    2749             :     }
    2750             : 
    2751             :     /*
    2752             :      * Close all the partitioned tables, leaf partitions, and their indices
    2753             :      * and release the slot used for tuple routing, if set.
    2754             :      */
    2755       70880 :     if (node->mt_partition_tuple_routing)
    2756             :     {
    2757        1552 :         ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
    2758             : 
    2759        1552 :         if (node->mt_root_tuple_slot)
    2760         322 :             ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
    2761             :     }
    2762             : 
    2763             :     /*
    2764             :      * Free the exprcontext
    2765             :      */
    2766       70880 :     ExecFreeExprContext(&node->ps);
    2767             : 
    2768             :     /*
    2769             :      * clean out the tuple table
    2770             :      */
    2771       70880 :     if (node->ps.ps_ResultTupleSlot)
    2772        1842 :         ExecClearTuple(node->ps.ps_ResultTupleSlot);
    2773             : 
    2774             :     /*
    2775             :      * Terminate EPQ execution if active
    2776             :      */
    2777       70880 :     EvalPlanQualEnd(&node->mt_epqstate);
    2778             : 
    2779             :     /*
    2780             :      * shut down subplans
    2781             :      */
    2782      142810 :     for (i = 0; i < node->mt_nplans; i++)
    2783       71930 :         ExecEndNode(node->mt_plans[i]);
    2784       70880 : }
    2785             : 
    2786             : void
    2787           0 : ExecReScanModifyTable(ModifyTableState *node)
    2788             : {
    2789             :     /*
    2790             :      * Currently, we don't need to support rescan on ModifyTable nodes. The
    2791             :      * semantics of that would be a bit debatable anyway.
    2792             :      */
    2793           0 :     elog(ERROR, "ExecReScanModifyTable is not implemented");
    2794             : }

Generated by: LCOV version 1.13