LCOV - code coverage report
Current view: top level - src/backend/executor - nodeModifyTable.c (source / functions) Hit Total Coverage
Test: PostgreSQL 13beta1 Lines: 748 807 92.7 %
Date: 2020-06-05 19:06:29 Functions: 19 20 95.0 %
Legend: Lines: hit not hit

          Line data    Source code
       1             : /*-------------------------------------------------------------------------
       2             :  *
       3             :  * nodeModifyTable.c
       4             :  *    routines to handle ModifyTable nodes.
       5             :  *
       6             :  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
       7             :  * Portions Copyright (c) 1994, Regents of the University of California
       8             :  *
       9             :  *
      10             :  * IDENTIFICATION
      11             :  *    src/backend/executor/nodeModifyTable.c
      12             :  *
      13             :  *-------------------------------------------------------------------------
      14             :  */
      15             : /* INTERFACE ROUTINES
      16             :  *      ExecInitModifyTable - initialize the ModifyTable node
      17             :  *      ExecModifyTable     - retrieve the next tuple from the node
      18             :  *      ExecEndModifyTable  - shut down the ModifyTable node
      19             :  *      ExecReScanModifyTable - rescan the ModifyTable node
      20             :  *
      21             :  *   NOTES
      22             :  *      Each ModifyTable node contains a list of one or more subplans,
      23             :  *      much like an Append node.  There is one subplan per result relation.
      24             :  *      The key reason for this is that in an inherited UPDATE command, each
      25             :  *      result relation could have a different schema (more or different
      26             :  *      columns) requiring a different plan tree to produce it.  In an
      27             :  *      inherited DELETE, all the subplans should produce the same output
      28             :  *      rowtype, but we might still find that different plans are appropriate
      29             :  *      for different child relations.
      30             :  *
      31             :  *      If the query specifies RETURNING, then the ModifyTable returns a
      32             :  *      RETURNING tuple after completing each row insert, update, or delete.
      33             :  *      It must be called again to continue the operation.  Without RETURNING,
      34             :  *      we just loop within the node until all the work is done, then
      35             :  *      return NULL.  This avoids useless call/return overhead.
      36             :  */
      37             : 
      38             : #include "postgres.h"
      39             : 
      40             : #include "access/heapam.h"
      41             : #include "access/htup_details.h"
      42             : #include "access/tableam.h"
      43             : #include "access/xact.h"
      44             : #include "catalog/catalog.h"
      45             : #include "commands/trigger.h"
      46             : #include "executor/execPartition.h"
      47             : #include "executor/executor.h"
      48             : #include "executor/nodeModifyTable.h"
      49             : #include "foreign/fdwapi.h"
      50             : #include "miscadmin.h"
      51             : #include "nodes/nodeFuncs.h"
      52             : #include "rewrite/rewriteHandler.h"
      53             : #include "storage/bufmgr.h"
      54             : #include "storage/lmgr.h"
      55             : #include "utils/builtins.h"
      56             : #include "utils/datum.h"
      57             : #include "utils/memutils.h"
      58             : #include "utils/rel.h"
      59             : 
      60             : 
      61             : static bool ExecOnConflictUpdate(ModifyTableState *mtstate,
      62             :                                  ResultRelInfo *resultRelInfo,
      63             :                                  ItemPointer conflictTid,
      64             :                                  TupleTableSlot *planSlot,
      65             :                                  TupleTableSlot *excludedSlot,
      66             :                                  EState *estate,
      67             :                                  bool canSetTag,
      68             :                                  TupleTableSlot **returning);
      69             : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
      70             :                                                EState *estate,
      71             :                                                PartitionTupleRouting *proute,
      72             :                                                ResultRelInfo *targetRelInfo,
      73             :                                                TupleTableSlot *slot);
      74             : static ResultRelInfo *getTargetResultRelInfo(ModifyTableState *node);
      75             : static void ExecSetupChildParentMapForSubplan(ModifyTableState *mtstate);
      76             : static TupleConversionMap *tupconv_map_for_subplan(ModifyTableState *node,
      77             :                                                    int whichplan);
      78             : 
      79             : /*
      80             :  * Verify that the tuples to be produced by INSERT or UPDATE match the
      81             :  * target relation's rowtype
      82             :  *
      83             :  * We do this to guard against stale plans.  If plan invalidation is
      84             :  * functioning properly then we should never get a failure here, but better
      85             :  * safe than sorry.  Note that this is called after we have obtained lock
      86             :  * on the target rel, so the rowtype can't change underneath us.
      87             :  *
      88             :  * The plan output is represented by its targetlist, because that makes
      89             :  * handling the dropped-column case easier.
      90             :  */
      91             : static void
      92       80456 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
      93             : {
      94       80456 :     TupleDesc   resultDesc = RelationGetDescr(resultRel);
      95       80456 :     int         attno = 0;
      96             :     ListCell   *lc;
      97             : 
      98      453458 :     foreach(lc, targetList)
      99             :     {
     100      373002 :         TargetEntry *tle = (TargetEntry *) lfirst(lc);
     101             :         Form_pg_attribute attr;
     102             : 
     103      373002 :         if (tle->resjunk)
     104       12406 :             continue;           /* ignore junk tlist items */
     105             : 
     106      360596 :         if (attno >= resultDesc->natts)
     107           0 :             ereport(ERROR,
     108             :                     (errcode(ERRCODE_DATATYPE_MISMATCH),
     109             :                      errmsg("table row type and query-specified row type do not match"),
     110             :                      errdetail("Query has too many columns.")));
     111      360596 :         attr = TupleDescAttr(resultDesc, attno);
     112      360596 :         attno++;
     113             : 
     114      360596 :         if (!attr->attisdropped)
     115             :         {
     116             :             /* Normal case: demand type match */
     117      360024 :             if (exprType((Node *) tle->expr) != attr->atttypid)
     118           0 :                 ereport(ERROR,
     119             :                         (errcode(ERRCODE_DATATYPE_MISMATCH),
     120             :                          errmsg("table row type and query-specified row type do not match"),
     121             :                          errdetail("Table has type %s at ordinal position %d, but query expects %s.",
     122             :                                    format_type_be(attr->atttypid),
     123             :                                    attno,
     124             :                                    format_type_be(exprType((Node *) tle->expr)))));
     125             :         }
     126             :         else
     127             :         {
     128             :             /*
     129             :              * For a dropped column, we can't check atttypid (it's likely 0).
     130             :              * In any case the planner has most likely inserted an INT4 null.
     131             :              * What we insist on is just *some* NULL constant.
     132             :              */
     133         572 :             if (!IsA(tle->expr, Const) ||
     134         572 :                 !((Const *) tle->expr)->constisnull)
     135           0 :                 ereport(ERROR,
     136             :                         (errcode(ERRCODE_DATATYPE_MISMATCH),
     137             :                          errmsg("table row type and query-specified row type do not match"),
     138             :                          errdetail("Query provides a value for a dropped column at ordinal position %d.",
     139             :                                    attno)));
     140             :         }
     141             :     }
     142       80456 :     if (attno != resultDesc->natts)
     143           0 :         ereport(ERROR,
     144             :                 (errcode(ERRCODE_DATATYPE_MISMATCH),
     145             :                  errmsg("table row type and query-specified row type do not match"),
     146             :                  errdetail("Query has too few columns.")));
     147       80456 : }
     148             : 
     149             : /*
     150             :  * ExecProcessReturning --- evaluate a RETURNING list
     151             :  *
     152             :  * resultRelInfo: current result rel
     153             :  * tupleSlot: slot holding tuple actually inserted/updated/deleted
     154             :  * planSlot: slot holding tuple returned by top subplan node
     155             :  *
     156             :  * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
     157             :  * scan tuple.
     158             :  *
     159             :  * Returns a slot holding the result tuple
     160             :  */
     161             : static TupleTableSlot *
     162        4356 : ExecProcessReturning(ResultRelInfo *resultRelInfo,
     163             :                      TupleTableSlot *tupleSlot,
     164             :                      TupleTableSlot *planSlot)
     165             : {
     166        4356 :     ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
     167        4356 :     ExprContext *econtext = projectReturning->pi_exprContext;
     168             : 
     169             :     /* Make tuple and any needed join variables available to ExecProject */
     170        4356 :     if (tupleSlot)
     171        3664 :         econtext->ecxt_scantuple = tupleSlot;
     172        4356 :     econtext->ecxt_outertuple = planSlot;
     173             : 
     174             :     /*
     175             :      * RETURNING expressions might reference the tableoid column, so
     176             :      * reinitialize tts_tableOid before evaluating them.
     177             :      */
     178        8712 :     econtext->ecxt_scantuple->tts_tableOid =
     179        8712 :         RelationGetRelid(resultRelInfo->ri_RelationDesc);
     180             : 
     181             :     /* Compute the RETURNING expressions */
     182        4356 :     return ExecProject(projectReturning);
     183             : }
     184             : 
     185             : /*
     186             :  * ExecCheckTupleVisible -- verify tuple is visible
     187             :  *
     188             :  * It would not be consistent with guarantees of the higher isolation levels to
     189             :  * proceed with avoiding insertion (taking speculative insertion's alternative
     190             :  * path) on the basis of another tuple that is not visible to MVCC snapshot.
     191             :  * Check for the need to raise a serialization failure, and do so as necessary.
     192             :  */
     193             : static void
     194        4986 : ExecCheckTupleVisible(EState *estate,
     195             :                       Relation rel,
     196             :                       TupleTableSlot *slot)
     197             : {
     198        4986 :     if (!IsolationUsesXactSnapshot())
     199        4926 :         return;
     200             : 
     201          60 :     if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
     202             :     {
     203             :         Datum       xminDatum;
     204             :         TransactionId xmin;
     205             :         bool        isnull;
     206             : 
     207          36 :         xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
     208             :         Assert(!isnull);
     209          36 :         xmin = DatumGetTransactionId(xminDatum);
     210             : 
     211             :         /*
     212             :          * We should not raise a serialization failure if the conflict is
     213             :          * against a tuple inserted by our own transaction, even if it's not
     214             :          * visible to our snapshot.  (This would happen, for example, if
     215             :          * conflicting keys are proposed for insertion in a single command.)
     216             :          */
     217          36 :         if (!TransactionIdIsCurrentTransactionId(xmin))
     218          20 :             ereport(ERROR,
     219             :                     (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
     220             :                      errmsg("could not serialize access due to concurrent update")));
     221             :     }
     222             : }
     223             : 
     224             : /*
     225             :  * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
     226             :  */
     227             : static void
     228         122 : ExecCheckTIDVisible(EState *estate,
     229             :                     ResultRelInfo *relinfo,
     230             :                     ItemPointer tid,
     231             :                     TupleTableSlot *tempSlot)
     232             : {
     233         122 :     Relation    rel = relinfo->ri_RelationDesc;
     234             : 
     235             :     /* Redundantly check isolation level */
     236         122 :     if (!IsolationUsesXactSnapshot())
     237          62 :         return;
     238             : 
     239          60 :     if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
     240           0 :         elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
     241          60 :     ExecCheckTupleVisible(estate, rel, tempSlot);
     242          40 :     ExecClearTuple(tempSlot);
     243             : }
     244             : 
     245             : /*
     246             :  * Compute stored generated columns for a tuple
     247             :  */
     248             : void
     249         362 : ExecComputeStoredGenerated(EState *estate, TupleTableSlot *slot, CmdType cmdtype)
     250             : {
     251         362 :     ResultRelInfo *resultRelInfo = estate->es_result_relation_info;
     252         362 :     Relation    rel = resultRelInfo->ri_RelationDesc;
     253         362 :     TupleDesc   tupdesc = RelationGetDescr(rel);
     254         362 :     int         natts = tupdesc->natts;
     255             :     MemoryContext oldContext;
     256             :     Datum      *values;
     257             :     bool       *nulls;
     258             : 
     259             :     Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
     260             : 
     261             :     /*
     262             :      * If first time through for this result relation, build expression
     263             :      * nodetrees for rel's stored generation expressions.  Keep them in the
     264             :      * per-query memory context so they'll survive throughout the query.
     265             :      */
     266         362 :     if (resultRelInfo->ri_GeneratedExprs == NULL)
     267             :     {
     268         254 :         oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
     269             : 
     270         254 :         resultRelInfo->ri_GeneratedExprs =
     271         254 :             (ExprState **) palloc(natts * sizeof(ExprState *));
     272         254 :         resultRelInfo->ri_NumGeneratedNeeded = 0;
     273             : 
     274         786 :         for (int i = 0; i < natts; i++)
     275             :         {
     276         532 :             if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
     277             :             {
     278             :                 Expr       *expr;
     279             : 
     280             :                 /*
     281             :                  * If it's an update and the current column was not marked as
     282             :                  * being updated, then we can skip the computation.  But if
     283             :                  * there is a BEFORE ROW UPDATE trigger, we cannot skip
     284             :                  * because the trigger might affect additional columns.
     285             :                  */
     286         254 :                 if (cmdtype == CMD_UPDATE &&
     287          40 :                     !(rel->trigdesc && rel->trigdesc->trig_update_before_row) &&
     288          22 :                     !bms_is_member(i + 1 - FirstLowInvalidHeapAttributeNumber,
     289          22 :                                    exec_rt_fetch(resultRelInfo->ri_RangeTableIndex, estate)->extraUpdatedCols))
     290             :                 {
     291           4 :                     resultRelInfo->ri_GeneratedExprs[i] = NULL;
     292           4 :                     continue;
     293             :                 }
     294             : 
     295         250 :                 expr = (Expr *) build_column_default(rel, i + 1);
     296         250 :                 if (expr == NULL)
     297           0 :                     elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
     298             :                          i + 1, RelationGetRelationName(rel));
     299             : 
     300         250 :                 resultRelInfo->ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
     301         250 :                 resultRelInfo->ri_NumGeneratedNeeded++;
     302             :             }
     303             :         }
     304             : 
     305         254 :         MemoryContextSwitchTo(oldContext);
     306             :     }
     307             : 
     308             :     /*
     309             :      * If no generated columns have been affected by this change, then skip
     310             :      * the rest.
     311             :      */
     312         362 :     if (resultRelInfo->ri_NumGeneratedNeeded == 0)
     313           4 :         return;
     314             : 
     315         358 :     oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
     316             : 
     317         358 :     values = palloc(sizeof(*values) * natts);
     318         358 :     nulls = palloc(sizeof(*nulls) * natts);
     319             : 
     320         358 :     slot_getallattrs(slot);
     321         358 :     memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
     322             : 
     323        1102 :     for (int i = 0; i < natts; i++)
     324             :     {
     325         752 :         Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
     326             : 
     327         752 :         if (attr->attgenerated == ATTRIBUTE_GENERATED_STORED &&
     328         358 :             resultRelInfo->ri_GeneratedExprs[i])
     329         350 :         {
     330             :             ExprContext *econtext;
     331             :             Datum       val;
     332             :             bool        isnull;
     333             : 
     334         358 :             econtext = GetPerTupleExprContext(estate);
     335         358 :             econtext->ecxt_scantuple = slot;
     336             : 
     337         358 :             val = ExecEvalExpr(resultRelInfo->ri_GeneratedExprs[i], econtext, &isnull);
     338             : 
     339             :             /*
     340             :              * We must make a copy of val as we have no guarantees about where
     341             :              * memory for a pass-by-reference Datum is located.
     342             :              */
     343         350 :             if (!isnull)
     344         322 :                 val = datumCopy(val, attr->attbyval, attr->attlen);
     345             : 
     346         350 :             values[i] = val;
     347         350 :             nulls[i] = isnull;
     348             :         }
     349             :         else
     350             :         {
     351         394 :             if (!nulls[i])
     352         378 :                 values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
     353             :         }
     354             :     }
     355             : 
     356         350 :     ExecClearTuple(slot);
     357         350 :     memcpy(slot->tts_values, values, sizeof(*values) * natts);
     358         350 :     memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
     359         350 :     ExecStoreVirtualTuple(slot);
     360         350 :     ExecMaterializeSlot(slot);
     361             : 
     362         350 :     MemoryContextSwitchTo(oldContext);
     363             : }
     364             : 
     365             : /* ----------------------------------------------------------------
     366             :  *      ExecInsert
     367             :  *
     368             :  *      For INSERT, we have to insert the tuple into the target relation
     369             :  *      and insert appropriate tuples into the index relations.
     370             :  *
     371             :  *      Returns RETURNING result if any, otherwise NULL.
     372             :  * ----------------------------------------------------------------
     373             :  */
     374             : static TupleTableSlot *
     375    10826446 : ExecInsert(ModifyTableState *mtstate,
     376             :            TupleTableSlot *slot,
     377             :            TupleTableSlot *planSlot,
     378             :            EState *estate,
     379             :            bool canSetTag)
     380             : {
     381             :     ResultRelInfo *resultRelInfo;
     382             :     Relation    resultRelationDesc;
     383    10826446 :     List       *recheckIndexes = NIL;
     384    10826446 :     TupleTableSlot *result = NULL;
     385             :     TransitionCaptureState *ar_insert_trig_tcs;
     386    10826446 :     ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
     387    10826446 :     OnConflictAction onconflict = node->onConflictAction;
     388             : 
     389    10826446 :     ExecMaterializeSlot(slot);
     390             : 
     391             :     /*
     392             :      * get information on the (current) result relation
     393             :      */
     394    10826446 :     resultRelInfo = estate->es_result_relation_info;
     395    10826446 :     resultRelationDesc = resultRelInfo->ri_RelationDesc;
     396             : 
     397             :     /*
     398             :      * BEFORE ROW INSERT Triggers.
     399             :      *
     400             :      * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
     401             :      * INSERT ... ON CONFLICT statement.  We cannot check for constraint
     402             :      * violations before firing these triggers, because they can change the
     403             :      * values to insert.  Also, they can run arbitrary user-defined code with
     404             :      * side-effects that we can't cancel by just not inserting the tuple.
     405             :      */
     406    10826446 :     if (resultRelInfo->ri_TrigDesc &&
     407       49244 :         resultRelInfo->ri_TrigDesc->trig_insert_before_row)
     408             :     {
     409        1254 :         if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
     410          16 :             return NULL;        /* "do nothing" */
     411             :     }
     412             : 
     413             :     /* INSTEAD OF ROW INSERT Triggers */
     414    10826346 :     if (resultRelInfo->ri_TrigDesc &&
     415       49144 :         resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
     416             :     {
     417          94 :         if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
     418           4 :             return NULL;        /* "do nothing" */
     419             :     }
     420    10826252 :     else if (resultRelInfo->ri_FdwRoutine)
     421             :     {
     422             :         /*
     423             :          * Compute stored generated columns
     424             :          */
     425         394 :         if (resultRelationDesc->rd_att->constr &&
     426         362 :             resultRelationDesc->rd_att->constr->has_generated_stored)
     427           4 :             ExecComputeStoredGenerated(estate, slot, CMD_INSERT);
     428             : 
     429             :         /*
     430             :          * insert into foreign table: let the FDW do it
     431             :          */
     432         394 :         slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
     433             :                                                                resultRelInfo,
     434             :                                                                slot,
     435             :                                                                planSlot);
     436             : 
     437         388 :         if (slot == NULL)       /* "do nothing" */
     438           4 :             return NULL;
     439             : 
     440             :         /*
     441             :          * AFTER ROW Triggers or RETURNING expressions might reference the
     442             :          * tableoid column, so (re-)initialize tts_tableOid before evaluating
     443             :          * them.
     444             :          */
     445         384 :         slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
     446             :     }
     447             :     else
     448             :     {
     449             :         WCOKind     wco_kind;
     450             : 
     451             :         /*
     452             :          * Constraints might reference the tableoid column, so (re-)initialize
     453             :          * tts_tableOid before evaluating them.
     454             :          */
     455    10825858 :         slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
     456             : 
     457             :         /*
     458             :          * Compute stored generated columns
     459             :          */
     460    10825858 :         if (resultRelationDesc->rd_att->constr &&
     461     5259428 :             resultRelationDesc->rd_att->constr->has_generated_stored)
     462         284 :             ExecComputeStoredGenerated(estate, slot, CMD_INSERT);
     463             : 
     464             :         /*
     465             :          * Check any RLS WITH CHECK policies.
     466             :          *
     467             :          * Normally we should check INSERT policies. But if the insert is the
     468             :          * result of a partition key update that moved the tuple to a new
     469             :          * partition, we should instead check UPDATE policies, because we are
     470             :          * executing policies defined on the target table, and not those
     471             :          * defined on the child partitions.
     472             :          */
     473    21651700 :         wco_kind = (mtstate->operation == CMD_UPDATE) ?
     474    10825850 :             WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
     475             : 
     476             :         /*
     477             :          * ExecWithCheckOptions() will skip any WCOs which are not of the kind
     478             :          * we are looking for at this point.
     479             :          */
     480    10825850 :         if (resultRelInfo->ri_WithCheckOptions != NIL)
     481         344 :             ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
     482             : 
     483             :         /*
     484             :          * Check the constraints of the tuple.
     485             :          */
     486    10825738 :         if (resultRelationDesc->rd_att->constr)
     487     5259372 :             ExecConstraints(resultRelInfo, slot, estate);
     488             : 
     489             :         /*
     490             :          * Also check the tuple against the partition constraint, if there is
     491             :          * one; except that if we got here via tuple-routing, we don't need to
     492             :          * if there's no BR trigger defined on the partition.
     493             :          */
     494    10825370 :         if (resultRelInfo->ri_PartitionCheck &&
     495      505580 :             (resultRelInfo->ri_PartitionRoot == NULL ||
     496      504058 :              (resultRelInfo->ri_TrigDesc &&
     497         532 :               resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
     498        1622 :             ExecPartitionCheck(resultRelInfo, slot, estate, true);
     499             : 
     500    10825262 :         if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
     501        3884 :         {
     502             :             /* Perform a speculative insertion. */
     503             :             uint32      specToken;
     504             :             ItemPointerData conflictTid;
     505             :             bool        specConflict;
     506             :             List       *arbiterIndexes;
     507             : 
     508        8956 :             arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
     509             : 
     510             :             /*
     511             :              * Do a non-conclusive check for conflicts first.
     512             :              *
     513             :              * We're not holding any locks yet, so this doesn't guarantee that
     514             :              * the later insert won't conflict.  But it avoids leaving behind
     515             :              * a lot of canceled speculative insertions, if you run a lot of
     516             :              * INSERT ON CONFLICT statements that do conflict.
     517             :              *
     518             :              * We loop back here if we find a conflict below, either during
     519             :              * the pre-check, or when we re-check after inserting the tuple
     520             :              * speculatively.
     521             :              */
     522        8966 :     vlock:
     523        8966 :             specConflict = false;
     524        8966 :             if (!ExecCheckIndexConstraints(slot, estate, &conflictTid,
     525             :                                            arbiterIndexes))
     526             :             {
     527             :                 /* committed conflict tuple found */
     528        5064 :                 if (onconflict == ONCONFLICT_UPDATE)
     529             :                 {
     530             :                     /*
     531             :                      * In case of ON CONFLICT DO UPDATE, execute the UPDATE
     532             :                      * part.  Be prepared to retry if the UPDATE fails because
     533             :                      * of another concurrent UPDATE/DELETE to the conflict
     534             :                      * tuple.
     535             :                      */
     536        4942 :                     TupleTableSlot *returning = NULL;
     537             : 
     538        4942 :                     if (ExecOnConflictUpdate(mtstate, resultRelInfo,
     539             :                                              &conflictTid, planSlot, slot,
     540             :                                              estate, canSetTag, &returning))
     541             :                     {
     542        4890 :                         InstrCountTuples2(&mtstate->ps, 1);
     543        4890 :                         return returning;
     544             :                     }
     545             :                     else
     546           0 :                         goto vlock;
     547             :                 }
     548             :                 else
     549             :                 {
     550             :                     /*
     551             :                      * In case of ON CONFLICT DO NOTHING, do nothing. However,
     552             :                      * verify that the tuple is visible to the executor's MVCC
     553             :                      * snapshot at higher isolation levels.
     554             :                      *
     555             :                      * Using ExecGetReturningSlot() to store the tuple for the
     556             :                      * recheck isn't that pretty, but we can't trivially use
     557             :                      * the input slot, because it might not be of a compatible
     558             :                      * type. As there's no conflicting usage of
     559             :                      * ExecGetReturningSlot() in the DO NOTHING case...
     560             :                      */
     561             :                     Assert(onconflict == ONCONFLICT_NOTHING);
     562         122 :                     ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
     563             :                                         ExecGetReturningSlot(estate, resultRelInfo));
     564         102 :                     InstrCountTuples2(&mtstate->ps, 1);
     565         102 :                     return NULL;
     566             :                 }
     567             :             }
     568             : 
     569             :             /*
     570             :              * Before we start insertion proper, acquire our "speculative
     571             :              * insertion lock".  Others can use that to wait for us to decide
     572             :              * if we're going to go ahead with the insertion, instead of
     573             :              * waiting for the whole transaction to complete.
     574             :              */
     575        3898 :             specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
     576             : 
     577             :             /* insert the tuple, with the speculative token */
     578        3898 :             table_tuple_insert_speculative(resultRelationDesc, slot,
     579             :                                            estate->es_output_cid,
     580             :                                            0,
     581             :                                            NULL,
     582             :                                            specToken);
     583             : 
     584             :             /* insert index entries for tuple */
     585        3898 :             recheckIndexes = ExecInsertIndexTuples(slot, estate, true,
     586             :                                                    &specConflict,
     587             :                                                    arbiterIndexes);
     588             : 
     589             :             /* adjust the tuple's state accordingly */
     590        3894 :             table_tuple_complete_speculative(resultRelationDesc, slot,
     591        3894 :                                              specToken, !specConflict);
     592             : 
     593             :             /*
     594             :              * Wake up anyone waiting for our decision.  They will re-check
     595             :              * the tuple, see that it's no longer speculative, and wait on our
     596             :              * XID as if this was a regularly inserted tuple all along.  Or if
     597             :              * we killed the tuple, they will see it's dead, and proceed as if
     598             :              * the tuple never existed.
     599             :              */
     600        3894 :             SpeculativeInsertionLockRelease(GetCurrentTransactionId());
     601             : 
     602             :             /*
     603             :              * If there was a conflict, start from the beginning.  We'll do
     604             :              * the pre-check again, which will now find the conflicting tuple
     605             :              * (unless it aborts before we get there).
     606             :              */
     607        3894 :             if (specConflict)
     608             :             {
     609          10 :                 list_free(recheckIndexes);
     610          10 :                 goto vlock;
     611             :             }
     612             : 
     613             :             /* Since there was no insertion conflict, we're done */
     614             :         }
     615             :         else
     616             :         {
     617             :             /* insert the tuple normally */
     618    10816306 :             table_tuple_insert(resultRelationDesc, slot,
     619             :                                estate->es_output_cid,
     620             :                                0, NULL);
     621             : 
     622             :             /* insert index entries for tuple */
     623    10816284 :             if (resultRelInfo->ri_NumIndices > 0)
     624     4184020 :                 recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL,
     625             :                                                        NIL);
     626             :         }
     627             :     }
     628             : 
     629    10820268 :     if (canSetTag)
     630             :     {
     631    10819552 :         (estate->es_processed)++;
     632    10819552 :         setLastTid(&slot->tts_tid);
     633             :     }
     634             : 
     635             :     /*
     636             :      * If this insert is the result of a partition key update that moved the
     637             :      * tuple to a new partition, put this row into the transition NEW TABLE,
     638             :      * if there is one. We need to do this separately for DELETE and INSERT
     639             :      * because they happen on different tables.
     640             :      */
     641    10820268 :     ar_insert_trig_tcs = mtstate->mt_transition_capture;
     642    10820268 :     if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
     643          28 :         && mtstate->mt_transition_capture->tcs_update_new_table)
     644             :     {
     645          28 :         ExecARUpdateTriggers(estate, resultRelInfo, NULL,
     646             :                              NULL,
     647             :                              slot,
     648             :                              NULL,
     649          28 :                              mtstate->mt_transition_capture);
     650             : 
     651             :         /*
     652             :          * We've already captured the NEW TABLE row, so make sure any AR
     653             :          * INSERT trigger fired below doesn't capture it again.
     654             :          */
     655          28 :         ar_insert_trig_tcs = NULL;
     656             :     }
     657             : 
     658             :     /* AFTER ROW INSERT Triggers */
     659    10820268 :     ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
     660             :                          ar_insert_trig_tcs);
     661             : 
     662    10820268 :     list_free(recheckIndexes);
     663             : 
     664             :     /*
     665             :      * Check any WITH CHECK OPTION constraints from parent views.  We are
     666             :      * required to do this after testing all constraints and uniqueness
     667             :      * violations per the SQL spec, so we do it after actually inserting the
     668             :      * record into the heap and all indexes.
     669             :      *
     670             :      * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
     671             :      * tuple will never be seen, if it violates the WITH CHECK OPTION.
     672             :      *
     673             :      * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
     674             :      * are looking for at this point.
     675             :      */
     676    10820268 :     if (resultRelInfo->ri_WithCheckOptions != NIL)
     677         212 :         ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
     678             : 
     679             :     /* Process RETURNING if present */
     680    10820180 :     if (resultRelInfo->ri_projectReturning)
     681        1398 :         result = ExecProcessReturning(resultRelInfo, slot, planSlot);
     682             : 
     683    10820180 :     return result;
     684             : }
     685             : 
     686             : /* ----------------------------------------------------------------
     687             :  *      ExecDelete
     688             :  *
     689             :  *      DELETE is like UPDATE, except that we delete the tuple and no
     690             :  *      index modifications are needed.
     691             :  *
     692             :  *      When deleting from a table, tupleid identifies the tuple to
     693             :  *      delete and oldtuple is NULL.  When deleting from a view,
     694             :  *      oldtuple is passed to the INSTEAD OF triggers and identifies
     695             :  *      what to delete, and tupleid is invalid.  When deleting from a
     696             :  *      foreign table, tupleid is invalid; the FDW has to figure out
     697             :  *      which row to delete using data from the planSlot.  oldtuple is
     698             :  *      passed to foreign table triggers; it is NULL when the foreign
     699             :  *      table has no relevant triggers.  We use tupleDeleted to indicate
     700             :  *      whether the tuple is actually deleted, callers can use it to
     701             :  *      decide whether to continue the operation.  When this DELETE is a
     702             :  *      part of an UPDATE of partition-key, then the slot returned by
     703             :  *      EvalPlanQual() is passed back using output parameter epqslot.
     704             :  *
     705             :  *      Returns RETURNING result if any, otherwise NULL.
     706             :  * ----------------------------------------------------------------
     707             :  */
     708             : static TupleTableSlot *
     709      876882 : ExecDelete(ModifyTableState *mtstate,
     710             :            ItemPointer tupleid,
     711             :            HeapTuple oldtuple,
     712             :            TupleTableSlot *planSlot,
     713             :            EPQState *epqstate,
     714             :            EState *estate,
     715             :            bool processReturning,
     716             :            bool canSetTag,
     717             :            bool changingPart,
     718             :            bool *tupleDeleted,
     719             :            TupleTableSlot **epqreturnslot)
     720             : {
     721             :     ResultRelInfo *resultRelInfo;
     722             :     Relation    resultRelationDesc;
     723             :     TM_Result   result;
     724             :     TM_FailureData tmfd;
     725      876882 :     TupleTableSlot *slot = NULL;
     726             :     TransitionCaptureState *ar_delete_trig_tcs;
     727             : 
     728      876882 :     if (tupleDeleted)
     729         376 :         *tupleDeleted = false;
     730             : 
     731             :     /*
     732             :      * get information on the (current) result relation
     733             :      */
     734      876882 :     resultRelInfo = estate->es_result_relation_info;
     735      876882 :     resultRelationDesc = resultRelInfo->ri_RelationDesc;
     736             : 
     737             :     /* BEFORE ROW DELETE Triggers */
     738      876882 :     if (resultRelInfo->ri_TrigDesc &&
     739        4286 :         resultRelInfo->ri_TrigDesc->trig_delete_before_row)
     740             :     {
     741             :         bool        dodelete;
     742             : 
     743         216 :         dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
     744             :                                         tupleid, oldtuple, epqreturnslot);
     745             : 
     746         192 :         if (!dodelete)          /* "do nothing" */
     747          22 :             return NULL;
     748             :     }
     749             : 
     750             :     /* INSTEAD OF ROW DELETE Triggers */
     751      876836 :     if (resultRelInfo->ri_TrigDesc &&
     752        4240 :         resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
     753          34 :     {
     754             :         bool        dodelete;
     755             : 
     756             :         Assert(oldtuple != NULL);
     757          38 :         dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
     758             : 
     759          38 :         if (!dodelete)          /* "do nothing" */
     760           4 :             return NULL;
     761             :     }
     762      876798 :     else if (resultRelInfo->ri_FdwRoutine)
     763             :     {
     764             :         /*
     765             :          * delete from foreign table: let the FDW do it
     766             :          *
     767             :          * We offer the returning slot as a place to store RETURNING data,
     768             :          * although the FDW can return some other slot if it wants.
     769             :          */
     770          18 :         slot = ExecGetReturningSlot(estate, resultRelInfo);
     771          18 :         slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
     772             :                                                                resultRelInfo,
     773             :                                                                slot,
     774             :                                                                planSlot);
     775             : 
     776          18 :         if (slot == NULL)       /* "do nothing" */
     777           0 :             return NULL;
     778             : 
     779             :         /*
     780             :          * RETURNING expressions might reference the tableoid column, so
     781             :          * (re)initialize tts_tableOid before evaluating them.
     782             :          */
     783          18 :         if (TTS_EMPTY(slot))
     784           0 :             ExecStoreAllNullTuple(slot);
     785             : 
     786          18 :         slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
     787             :     }
     788             :     else
     789             :     {
     790             :         /*
     791             :          * delete the tuple
     792             :          *
     793             :          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
     794             :          * that the row to be deleted is visible to that snapshot, and throw a
     795             :          * can't-serialize error if not. This is a special-case behavior
     796             :          * needed for referential integrity updates in transaction-snapshot
     797             :          * mode transactions.
     798             :          */
     799      876780 : ldelete:;
     800      876784 :         result = table_tuple_delete(resultRelationDesc, tupleid,
     801             :                                     estate->es_output_cid,
     802             :                                     estate->es_snapshot,
     803             :                                     estate->es_crosscheck_snapshot,
     804             :                                     true /* wait for commit */ ,
     805             :                                     &tmfd,
     806             :                                     changingPart);
     807             : 
     808      876748 :         switch (result)
     809             :         {
     810          20 :             case TM_SelfModified:
     811             : 
     812             :                 /*
     813             :                  * The target tuple was already updated or deleted by the
     814             :                  * current command, or by a later command in the current
     815             :                  * transaction.  The former case is possible in a join DELETE
     816             :                  * where multiple tuples join to the same target tuple. This
     817             :                  * is somewhat questionable, but Postgres has always allowed
     818             :                  * it: we just ignore additional deletion attempts.
     819             :                  *
     820             :                  * The latter case arises if the tuple is modified by a
     821             :                  * command in a BEFORE trigger, or perhaps by a command in a
     822             :                  * volatile function used in the query.  In such situations we
     823             :                  * should not ignore the deletion, but it is equally unsafe to
     824             :                  * proceed.  We don't want to discard the original DELETE
     825             :                  * while keeping the triggered actions based on its deletion;
     826             :                  * and it would be no better to allow the original DELETE
     827             :                  * while discarding updates that it triggered.  The row update
     828             :                  * carries some information that might be important according
     829             :                  * to business rules; so throwing an error is the only safe
     830             :                  * course.
     831             :                  *
     832             :                  * If a trigger actually intends this type of interaction, it
     833             :                  * can re-execute the DELETE and then return NULL to cancel
     834             :                  * the outer delete.
     835             :                  */
     836          20 :                 if (tmfd.cmax != estate->es_output_cid)
     837           4 :                     ereport(ERROR,
     838             :                             (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
     839             :                              errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
     840             :                              errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
     841             : 
     842             :                 /* Else, already deleted by self; nothing to do */
     843          16 :                 return NULL;
     844             : 
     845      876704 :             case TM_Ok:
     846      876704 :                 break;
     847             : 
     848          20 :             case TM_Updated:
     849             :                 {
     850             :                     TupleTableSlot *inputslot;
     851             :                     TupleTableSlot *epqslot;
     852             : 
     853          20 :                     if (IsolationUsesXactSnapshot())
     854           0 :                         ereport(ERROR,
     855             :                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
     856             :                                  errmsg("could not serialize access due to concurrent update")));
     857             : 
     858             :                     /*
     859             :                      * Already know that we're going to need to do EPQ, so
     860             :                      * fetch tuple directly into the right slot.
     861             :                      */
     862          20 :                     EvalPlanQualBegin(epqstate);
     863          20 :                     inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
     864             :                                                  resultRelInfo->ri_RangeTableIndex);
     865             : 
     866          20 :                     result = table_tuple_lock(resultRelationDesc, tupleid,
     867             :                                               estate->es_snapshot,
     868             :                                               inputslot, estate->es_output_cid,
     869             :                                               LockTupleExclusive, LockWaitBlock,
     870             :                                               TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
     871             :                                               &tmfd);
     872             : 
     873          18 :                     switch (result)
     874             :                     {
     875          12 :                         case TM_Ok:
     876             :                             Assert(tmfd.traversed);
     877          12 :                             epqslot = EvalPlanQual(epqstate,
     878             :                                                    resultRelationDesc,
     879             :                                                    resultRelInfo->ri_RangeTableIndex,
     880             :                                                    inputslot);
     881          12 :                             if (TupIsNull(epqslot))
     882             :                                 /* Tuple not passing quals anymore, exiting... */
     883           4 :                                 return NULL;
     884             : 
     885             :                             /*
     886             :                              * If requested, skip delete and pass back the
     887             :                              * updated row.
     888             :                              */
     889           8 :                             if (epqreturnslot)
     890             :                             {
     891           4 :                                 *epqreturnslot = epqslot;
     892           4 :                                 return NULL;
     893             :                             }
     894             :                             else
     895           4 :                                 goto ldelete;
     896             : 
     897           4 :                         case TM_SelfModified:
     898             : 
     899             :                             /*
     900             :                              * This can be reached when following an update
     901             :                              * chain from a tuple updated by another session,
     902             :                              * reaching a tuple that was already updated in
     903             :                              * this transaction. If previously updated by this
     904             :                              * command, ignore the delete, otherwise error
     905             :                              * out.
     906             :                              *
     907             :                              * See also TM_SelfModified response to
     908             :                              * table_tuple_delete() above.
     909             :                              */
     910           4 :                             if (tmfd.cmax != estate->es_output_cid)
     911           2 :                                 ereport(ERROR,
     912             :                                         (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
     913             :                                          errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
     914             :                                          errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
     915           2 :                             return NULL;
     916             : 
     917           2 :                         case TM_Deleted:
     918             :                             /* tuple already deleted; nothing to do */
     919           2 :                             return NULL;
     920             : 
     921           0 :                         default:
     922             : 
     923             :                             /*
     924             :                              * TM_Invisible should be impossible because we're
     925             :                              * waiting for updated row versions, and would
     926             :                              * already have errored out if the first version
     927             :                              * is invisible.
     928             :                              *
     929             :                              * TM_Updated should be impossible, because we're
     930             :                              * locking the latest version via
     931             :                              * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
     932             :                              */
     933           0 :                             elog(ERROR, "unexpected table_tuple_lock status: %u",
     934             :                                  result);
     935             :                             return NULL;
     936             :                     }
     937             : 
     938             :                     Assert(false);
     939             :                     break;
     940             :                 }
     941             : 
     942           4 :             case TM_Deleted:
     943           4 :                 if (IsolationUsesXactSnapshot())
     944           0 :                     ereport(ERROR,
     945             :                             (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
     946             :                              errmsg("could not serialize access due to concurrent delete")));
     947             :                 /* tuple already deleted; nothing to do */
     948           4 :                 return NULL;
     949             : 
     950           0 :             default:
     951           0 :                 elog(ERROR, "unrecognized table_tuple_delete status: %u",
     952             :                      result);
     953             :                 return NULL;
     954             :         }
     955             : 
     956             :         /*
     957             :          * Note: Normally one would think that we have to delete index tuples
     958             :          * associated with the heap tuple now...
     959             :          *
     960             :          * ... but in POSTGRES, we have no need to do this because VACUUM will
     961             :          * take care of it later.  We can't delete index tuples immediately
     962             :          * anyway, since the tuple is still visible to other transactions.
     963             :          */
     964             :     }
     965             : 
     966      876756 :     if (canSetTag)
     967      876274 :         (estate->es_processed)++;
     968             : 
     969             :     /* Tell caller that the delete actually happened. */
     970      876756 :     if (tupleDeleted)
     971         344 :         *tupleDeleted = true;
     972             : 
     973             :     /*
     974             :      * If this delete is the result of a partition key update that moved the
     975             :      * tuple to a new partition, put this row into the transition OLD TABLE,
     976             :      * if there is one. We need to do this separately for DELETE and INSERT
     977             :      * because they happen on different tables.
     978             :      */
     979      876756 :     ar_delete_trig_tcs = mtstate->mt_transition_capture;
     980      876756 :     if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
     981          28 :         && mtstate->mt_transition_capture->tcs_update_old_table)
     982             :     {
     983          28 :         ExecARUpdateTriggers(estate, resultRelInfo,
     984             :                              tupleid,
     985             :                              oldtuple,
     986             :                              NULL,
     987             :                              NULL,
     988          28 :                              mtstate->mt_transition_capture);
     989             : 
     990             :         /*
     991             :          * We've already captured the NEW TABLE row, so make sure any AR
     992             :          * DELETE trigger fired below doesn't capture it again.
     993             :          */
     994          28 :         ar_delete_trig_tcs = NULL;
     995             :     }
     996             : 
     997             :     /* AFTER ROW DELETE Triggers */
     998      876756 :     ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
     999             :                          ar_delete_trig_tcs);
    1000             : 
    1001             :     /* Process RETURNING if present and if requested */
    1002      876756 :     if (processReturning && resultRelInfo->ri_projectReturning)
    1003             :     {
    1004             :         /*
    1005             :          * We have to put the target tuple into a slot, which means first we
    1006             :          * gotta fetch it.  We can use the trigger tuple slot.
    1007             :          */
    1008             :         TupleTableSlot *rslot;
    1009             : 
    1010         634 :         if (resultRelInfo->ri_FdwRoutine)
    1011             :         {
    1012             :             /* FDW must have provided a slot containing the deleted row */
    1013             :             Assert(!TupIsNull(slot));
    1014             :         }
    1015             :         else
    1016             :         {
    1017         632 :             slot = ExecGetReturningSlot(estate, resultRelInfo);
    1018         632 :             if (oldtuple != NULL)
    1019             :             {
    1020          16 :                 ExecForceStoreHeapTuple(oldtuple, slot, false);
    1021             :             }
    1022             :             else
    1023             :             {
    1024         616 :                 if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
    1025             :                                                    SnapshotAny, slot))
    1026           0 :                     elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
    1027             :             }
    1028             :         }
    1029             : 
    1030         634 :         rslot = ExecProcessReturning(resultRelInfo, slot, planSlot);
    1031             : 
    1032             :         /*
    1033             :          * Before releasing the target tuple again, make sure rslot has a
    1034             :          * local copy of any pass-by-reference values.
    1035             :          */
    1036         634 :         ExecMaterializeSlot(rslot);
    1037             : 
    1038         634 :         ExecClearTuple(slot);
    1039             : 
    1040         634 :         return rslot;
    1041             :     }
    1042             : 
    1043      876122 :     return NULL;
    1044             : }
    1045             : 
    1046             : /* ----------------------------------------------------------------
    1047             :  *      ExecUpdate
    1048             :  *
    1049             :  *      note: we can't run UPDATE queries with transactions
    1050             :  *      off because UPDATEs are actually INSERTs and our
    1051             :  *      scan will mistakenly loop forever, updating the tuple
    1052             :  *      it just inserted..  This should be fixed but until it
    1053             :  *      is, we don't want to get stuck in an infinite loop
    1054             :  *      which corrupts your database..
    1055             :  *
    1056             :  *      When updating a table, tupleid identifies the tuple to
    1057             :  *      update and oldtuple is NULL.  When updating a view, oldtuple
    1058             :  *      is passed to the INSTEAD OF triggers and identifies what to
    1059             :  *      update, and tupleid is invalid.  When updating a foreign table,
    1060             :  *      tupleid is invalid; the FDW has to figure out which row to
    1061             :  *      update using data from the planSlot.  oldtuple is passed to
    1062             :  *      foreign table triggers; it is NULL when the foreign table has
    1063             :  *      no relevant triggers.
    1064             :  *
    1065             :  *      Returns RETURNING result if any, otherwise NULL.
    1066             :  * ----------------------------------------------------------------
    1067             :  */
    1068             : static TupleTableSlot *
    1069      132658 : ExecUpdate(ModifyTableState *mtstate,
    1070             :            ItemPointer tupleid,
    1071             :            HeapTuple oldtuple,
    1072             :            TupleTableSlot *slot,
    1073             :            TupleTableSlot *planSlot,
    1074             :            EPQState *epqstate,
    1075             :            EState *estate,
    1076             :            bool canSetTag)
    1077             : {
    1078             :     ResultRelInfo *resultRelInfo;
    1079             :     Relation    resultRelationDesc;
    1080             :     TM_Result   result;
    1081             :     TM_FailureData tmfd;
    1082      132658 :     List       *recheckIndexes = NIL;
    1083      132658 :     TupleConversionMap *saved_tcs_map = NULL;
    1084             : 
    1085             :     /*
    1086             :      * abort the operation if not running transactions
    1087             :      */
    1088      132658 :     if (IsBootstrapProcessingMode())
    1089           0 :         elog(ERROR, "cannot UPDATE during bootstrap");
    1090             : 
    1091      132658 :     ExecMaterializeSlot(slot);
    1092             : 
    1093             :     /*
    1094             :      * get information on the (current) result relation
    1095             :      */
    1096      132658 :     resultRelInfo = estate->es_result_relation_info;
    1097      132658 :     resultRelationDesc = resultRelInfo->ri_RelationDesc;
    1098             : 
    1099             :     /* BEFORE ROW UPDATE Triggers */
    1100      132658 :     if (resultRelInfo->ri_TrigDesc &&
    1101        3932 :         resultRelInfo->ri_TrigDesc->trig_update_before_row)
    1102             :     {
    1103        1936 :         if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
    1104             :                                   tupleid, oldtuple, slot))
    1105          96 :             return NULL;        /* "do nothing" */
    1106             :     }
    1107             : 
    1108             :     /* INSTEAD OF ROW UPDATE Triggers */
    1109      132522 :     if (resultRelInfo->ri_TrigDesc &&
    1110        3796 :         resultRelInfo->ri_TrigDesc->trig_update_instead_row)
    1111             :     {
    1112          78 :         if (!ExecIRUpdateTriggers(estate, resultRelInfo,
    1113             :                                   oldtuple, slot))
    1114          12 :             return NULL;        /* "do nothing" */
    1115             :     }
    1116      132444 :     else if (resultRelInfo->ri_FdwRoutine)
    1117             :     {
    1118             :         /*
    1119             :          * Compute stored generated columns
    1120             :          */
    1121          98 :         if (resultRelationDesc->rd_att->constr &&
    1122          72 :             resultRelationDesc->rd_att->constr->has_generated_stored)
    1123           2 :             ExecComputeStoredGenerated(estate, slot, CMD_UPDATE);
    1124             : 
    1125             :         /*
    1126             :          * update in foreign table: let the FDW do it
    1127             :          */
    1128          98 :         slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
    1129             :                                                                resultRelInfo,
    1130             :                                                                slot,
    1131             :                                                                planSlot);
    1132             : 
    1133          98 :         if (slot == NULL)       /* "do nothing" */
    1134           2 :             return NULL;
    1135             : 
    1136             :         /*
    1137             :          * AFTER ROW Triggers or RETURNING expressions might reference the
    1138             :          * tableoid column, so (re-)initialize tts_tableOid before evaluating
    1139             :          * them.
    1140             :          */
    1141          96 :         slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
    1142             :     }
    1143             :     else
    1144             :     {
    1145             :         LockTupleMode lockmode;
    1146             :         bool        partition_constraint_failed;
    1147             :         bool        update_indexes;
    1148             : 
    1149             :         /*
    1150             :          * Constraints might reference the tableoid column, so (re-)initialize
    1151             :          * tts_tableOid before evaluating them.
    1152             :          */
    1153      132346 :         slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
    1154             : 
    1155             :         /*
    1156             :          * Compute stored generated columns
    1157             :          */
    1158      132346 :         if (resultRelationDesc->rd_att->constr &&
    1159       67344 :             resultRelationDesc->rd_att->constr->has_generated_stored)
    1160          44 :             ExecComputeStoredGenerated(estate, slot, CMD_UPDATE);
    1161             : 
    1162             :         /*
    1163             :          * Check any RLS UPDATE WITH CHECK policies
    1164             :          *
    1165             :          * If we generate a new candidate tuple after EvalPlanQual testing, we
    1166             :          * must loop back here and recheck any RLS policies and constraints.
    1167             :          * (We don't need to redo triggers, however.  If there are any BEFORE
    1168             :          * triggers then trigger.c will have done table_tuple_lock to lock the
    1169             :          * correct tuple, so there's no need to do them again.)
    1170             :          */
    1171      132346 : lreplace:;
    1172             : 
    1173             :         /* ensure slot is independent, consider e.g. EPQ */
    1174      132444 :         ExecMaterializeSlot(slot);
    1175             : 
    1176             :         /*
    1177             :          * If partition constraint fails, this row might get moved to another
    1178             :          * partition, in which case we should check the RLS CHECK policy just
    1179             :          * before inserting into the new partition, rather than doing it here.
    1180             :          * This is because a trigger on that partition might again change the
    1181             :          * row.  So skip the WCO checks if the partition constraint fails.
    1182             :          */
    1183      132444 :         partition_constraint_failed =
    1184      133578 :             resultRelInfo->ri_PartitionCheck &&
    1185        1134 :             !ExecPartitionCheck(resultRelInfo, slot, estate, false);
    1186             : 
    1187      132444 :         if (!partition_constraint_failed &&
    1188      132028 :             resultRelInfo->ri_WithCheckOptions != NIL)
    1189             :         {
    1190             :             /*
    1191             :              * ExecWithCheckOptions() will skip any WCOs which are not of the
    1192             :              * kind we are looking for at this point.
    1193             :              */
    1194         284 :             ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
    1195             :                                  resultRelInfo, slot, estate);
    1196             :         }
    1197             : 
    1198             :         /*
    1199             :          * If a partition check failed, try to move the row into the right
    1200             :          * partition.
    1201             :          */
    1202      132416 :         if (partition_constraint_failed)
    1203             :         {
    1204             :             bool        tuple_deleted;
    1205             :             TupleTableSlot *ret_slot;
    1206         416 :             TupleTableSlot *epqslot = NULL;
    1207         416 :             PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
    1208             :             int         map_index;
    1209             :             TupleConversionMap *tupconv_map;
    1210             : 
    1211             :             /*
    1212             :              * Disallow an INSERT ON CONFLICT DO UPDATE that causes the
    1213             :              * original row to migrate to a different partition.  Maybe this
    1214             :              * can be implemented some day, but it seems a fringe feature with
    1215             :              * little redeeming value.
    1216             :              */
    1217         416 :             if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
    1218           0 :                 ereport(ERROR,
    1219             :                         (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
    1220             :                          errmsg("invalid ON UPDATE specification"),
    1221             :                          errdetail("The result tuple would appear in a different partition than the original tuple.")));
    1222             : 
    1223             :             /*
    1224             :              * When an UPDATE is run on a leaf partition, we will not have
    1225             :              * partition tuple routing set up. In that case, fail with
    1226             :              * partition constraint violation error.
    1227             :              */
    1228         416 :             if (proute == NULL)
    1229          40 :                 ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
    1230             : 
    1231             :             /*
    1232             :              * Row movement, part 1.  Delete the tuple, but skip RETURNING
    1233             :              * processing. We want to return rows from INSERT.
    1234             :              */
    1235         376 :             ExecDelete(mtstate, tupleid, oldtuple, planSlot, epqstate,
    1236             :                        estate, false, false /* canSetTag */ ,
    1237             :                        true /* changingPart */ , &tuple_deleted, &epqslot);
    1238             : 
    1239             :             /*
    1240             :              * For some reason if DELETE didn't happen (e.g. trigger prevented
    1241             :              * it, or it was already deleted by self, or it was concurrently
    1242             :              * deleted by another transaction), then we should skip the insert
    1243             :              * as well; otherwise, an UPDATE could cause an increase in the
    1244             :              * total number of rows across all partitions, which is clearly
    1245             :              * wrong.
    1246             :              *
    1247             :              * For a normal UPDATE, the case where the tuple has been the
    1248             :              * subject of a concurrent UPDATE or DELETE would be handled by
    1249             :              * the EvalPlanQual machinery, but for an UPDATE that we've
    1250             :              * translated into a DELETE from this partition and an INSERT into
    1251             :              * some other partition, that's not available, because CTID chains
    1252             :              * can't span relation boundaries.  We mimic the semantics to a
    1253             :              * limited extent by skipping the INSERT if the DELETE fails to
    1254             :              * find a tuple. This ensures that two concurrent attempts to
    1255             :              * UPDATE the same tuple at the same time can't turn one tuple
    1256             :              * into two, and that an UPDATE of a just-deleted tuple can't
    1257             :              * resurrect it.
    1258             :              */
    1259         376 :             if (!tuple_deleted)
    1260             :             {
    1261             :                 /*
    1262             :                  * epqslot will be typically NULL.  But when ExecDelete()
    1263             :                  * finds that another transaction has concurrently updated the
    1264             :                  * same row, it re-fetches the row, skips the delete, and
    1265             :                  * epqslot is set to the re-fetched tuple slot. In that case,
    1266             :                  * we need to do all the checks again.
    1267             :                  */
    1268          32 :                 if (TupIsNull(epqslot))
    1269         324 :                     return NULL;
    1270             :                 else
    1271             :                 {
    1272           6 :                     slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
    1273           6 :                     goto lreplace;
    1274             :                 }
    1275             :             }
    1276             : 
    1277             :             /*
    1278             :              * Updates set the transition capture map only when a new subplan
    1279             :              * is chosen.  But for inserts, it is set for each row. So after
    1280             :              * INSERT, we need to revert back to the map created for UPDATE;
    1281             :              * otherwise the next UPDATE will incorrectly use the one created
    1282             :              * for INSERT.  So first save the one created for UPDATE.
    1283             :              */
    1284         344 :             if (mtstate->mt_transition_capture)
    1285          28 :                 saved_tcs_map = mtstate->mt_transition_capture->tcs_map;
    1286             : 
    1287             :             /*
    1288             :              * resultRelInfo is one of the per-subplan resultRelInfos.  So we
    1289             :              * should convert the tuple into root's tuple descriptor, since
    1290             :              * ExecInsert() starts the search from root.  The tuple conversion
    1291             :              * map list is in the order of mtstate->resultRelInfo[], so to
    1292             :              * retrieve the one for this resultRel, we need to know the
    1293             :              * position of the resultRel in mtstate->resultRelInfo[].
    1294             :              */
    1295         344 :             map_index = resultRelInfo - mtstate->resultRelInfo;
    1296             :             Assert(map_index >= 0 && map_index < mtstate->mt_nplans);
    1297         344 :             tupconv_map = tupconv_map_for_subplan(mtstate, map_index);
    1298         344 :             if (tupconv_map != NULL)
    1299         138 :                 slot = execute_attr_map_slot(tupconv_map->attrMap,
    1300             :                                              slot,
    1301             :                                              mtstate->mt_root_tuple_slot);
    1302             : 
    1303             :             /*
    1304             :              * Prepare for tuple routing, making it look like we're inserting
    1305             :              * into the root.
    1306             :              */
    1307             :             Assert(mtstate->rootResultRelInfo != NULL);
    1308         344 :             slot = ExecPrepareTupleRouting(mtstate, estate, proute,
    1309             :                                            mtstate->rootResultRelInfo, slot);
    1310             : 
    1311         330 :             ret_slot = ExecInsert(mtstate, slot, planSlot,
    1312             :                                   estate, canSetTag);
    1313             : 
    1314             :             /* Revert ExecPrepareTupleRouting's node change. */
    1315         298 :             estate->es_result_relation_info = resultRelInfo;
    1316         298 :             if (mtstate->mt_transition_capture)
    1317             :             {
    1318          28 :                 mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
    1319          28 :                 mtstate->mt_transition_capture->tcs_map = saved_tcs_map;
    1320             :             }
    1321             : 
    1322         298 :             return ret_slot;
    1323             :         }
    1324             : 
    1325             :         /*
    1326             :          * Check the constraints of the tuple.  We've already checked the
    1327             :          * partition constraint above; however, we must still ensure the tuple
    1328             :          * passes all other constraints, so we will call ExecConstraints() and
    1329             :          * have it validate all remaining checks.
    1330             :          */
    1331      132000 :         if (resultRelationDesc->rd_att->constr)
    1332       67256 :             ExecConstraints(resultRelInfo, slot, estate);
    1333             : 
    1334             :         /*
    1335             :          * replace the heap tuple
    1336             :          *
    1337             :          * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check
    1338             :          * that the row to be updated is visible to that snapshot, and throw a
    1339             :          * can't-serialize error if not. This is a special-case behavior
    1340             :          * needed for referential integrity updates in transaction-snapshot
    1341             :          * mode transactions.
    1342             :          */
    1343      131972 :         result = table_tuple_update(resultRelationDesc, tupleid, slot,
    1344             :                                     estate->es_output_cid,
    1345             :                                     estate->es_snapshot,
    1346             :                                     estate->es_crosscheck_snapshot,
    1347             :                                     true /* wait for commit */ ,
    1348             :                                     &tmfd, &lockmode, &update_indexes);
    1349             : 
    1350      131948 :         switch (result)
    1351             :         {
    1352          56 :             case TM_SelfModified:
    1353             : 
    1354             :                 /*
    1355             :                  * The target tuple was already updated or deleted by the
    1356             :                  * current command, or by a later command in the current
    1357             :                  * transaction.  The former case is possible in a join UPDATE
    1358             :                  * where multiple tuples join to the same target tuple. This
    1359             :                  * is pretty questionable, but Postgres has always allowed it:
    1360             :                  * we just execute the first update action and ignore
    1361             :                  * additional update attempts.
    1362             :                  *
    1363             :                  * The latter case arises if the tuple is modified by a
    1364             :                  * command in a BEFORE trigger, or perhaps by a command in a
    1365             :                  * volatile function used in the query.  In such situations we
    1366             :                  * should not ignore the update, but it is equally unsafe to
    1367             :                  * proceed.  We don't want to discard the original UPDATE
    1368             :                  * while keeping the triggered actions based on it; and we
    1369             :                  * have no principled way to merge this update with the
    1370             :                  * previous ones.  So throwing an error is the only safe
    1371             :                  * course.
    1372             :                  *
    1373             :                  * If a trigger actually intends this type of interaction, it
    1374             :                  * can re-execute the UPDATE (assuming it can figure out how)
    1375             :                  * and then return NULL to cancel the outer update.
    1376             :                  */
    1377          56 :                 if (tmfd.cmax != estate->es_output_cid)
    1378           4 :                     ereport(ERROR,
    1379             :                             (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
    1380             :                              errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
    1381             :                              errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
    1382             : 
    1383             :                 /* Else, already updated by self; nothing to do */
    1384          52 :                 return NULL;
    1385             : 
    1386      131778 :             case TM_Ok:
    1387      131778 :                 break;
    1388             : 
    1389         110 :             case TM_Updated:
    1390             :                 {
    1391             :                     TupleTableSlot *inputslot;
    1392             :                     TupleTableSlot *epqslot;
    1393             : 
    1394         110 :                     if (IsolationUsesXactSnapshot())
    1395           0 :                         ereport(ERROR,
    1396             :                                 (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
    1397             :                                  errmsg("could not serialize access due to concurrent update")));
    1398             : 
    1399             :                     /*
    1400             :                      * Already know that we're going to need to do EPQ, so
    1401             :                      * fetch tuple directly into the right slot.
    1402             :                      */
    1403         110 :                     inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
    1404             :                                                  resultRelInfo->ri_RangeTableIndex);
    1405             : 
    1406         110 :                     result = table_tuple_lock(resultRelationDesc, tupleid,
    1407             :                                               estate->es_snapshot,
    1408             :                                               inputslot, estate->es_output_cid,
    1409             :                                               lockmode, LockWaitBlock,
    1410             :                                               TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
    1411             :                                               &tmfd);
    1412             : 
    1413         106 :                     switch (result)
    1414             :                     {
    1415          98 :                         case TM_Ok:
    1416             :                             Assert(tmfd.traversed);
    1417             : 
    1418          98 :                             epqslot = EvalPlanQual(epqstate,
    1419             :                                                    resultRelationDesc,
    1420             :                                                    resultRelInfo->ri_RangeTableIndex,
    1421             :                                                    inputslot);
    1422          98 :                             if (TupIsNull(epqslot))
    1423             :                                 /* Tuple not passing quals anymore, exiting... */
    1424           6 :                                 return NULL;
    1425             : 
    1426          92 :                             slot = ExecFilterJunk(resultRelInfo->ri_junkFilter, epqslot);
    1427          92 :                             goto lreplace;
    1428             : 
    1429           2 :                         case TM_Deleted:
    1430             :                             /* tuple already deleted; nothing to do */
    1431           2 :                             return NULL;
    1432             : 
    1433           6 :                         case TM_SelfModified:
    1434             : 
    1435             :                             /*
    1436             :                              * This can be reached when following an update
    1437             :                              * chain from a tuple updated by another session,
    1438             :                              * reaching a tuple that was already updated in
    1439             :                              * this transaction. If previously modified by
    1440             :                              * this command, ignore the redundant update,
    1441             :                              * otherwise error out.
    1442             :                              *
    1443             :                              * See also TM_SelfModified response to
    1444             :                              * table_tuple_update() above.
    1445             :                              */
    1446           6 :                             if (tmfd.cmax != estate->es_output_cid)
    1447           2 :                                 ereport(ERROR,
    1448             :                                         (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
    1449             :                                          errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
    1450             :                                          errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
    1451           4 :                             return NULL;
    1452             : 
    1453           0 :                         default:
    1454             :                             /* see table_tuple_lock call in ExecDelete() */
    1455           0 :                             elog(ERROR, "unexpected table_tuple_lock status: %u",
    1456             :                                  result);
    1457             :                             return NULL;
    1458             :                     }
    1459             :                 }
    1460             : 
    1461             :                 break;
    1462             : 
    1463           4 :             case TM_Deleted:
    1464           4 :                 if (IsolationUsesXactSnapshot())
    1465           0 :                     ereport(ERROR,
    1466             :                             (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
    1467             :                              errmsg("could not serialize access due to concurrent delete")));
    1468             :                 /* tuple already deleted; nothing to do */
    1469           4 :                 return NULL;
    1470             : 
    1471           0 :             default:
    1472           0 :                 elog(ERROR, "unrecognized table_tuple_update status: %u",
    1473             :                      result);
    1474             :                 return NULL;
    1475             :         }
    1476             : 
    1477             :         /* insert index entries for tuple if necessary */
    1478      131778 :         if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
    1479       73242 :             recheckIndexes = ExecInsertIndexTuples(slot, estate, false, NULL, NIL);
    1480             :     }
    1481             : 
    1482      131924 :     if (canSetTag)
    1483      131528 :         (estate->es_processed)++;
    1484             : 
    1485             :     /* AFTER ROW UPDATE Triggers */
    1486      131924 :     ExecARUpdateTriggers(estate, resultRelInfo, tupleid, oldtuple, slot,
    1487             :                          recheckIndexes,
    1488      131924 :                          mtstate->operation == CMD_INSERT ?
    1489             :                          mtstate->mt_oc_transition_capture :
    1490             :                          mtstate->mt_transition_capture);
    1491             : 
    1492      131924 :     list_free(recheckIndexes);
    1493             : 
    1494             :     /*
    1495             :      * Check any WITH CHECK OPTION constraints from parent views.  We are
    1496             :      * required to do this after testing all constraints and uniqueness
    1497             :      * violations per the SQL spec, so we do it after actually updating the
    1498             :      * record in the heap and all indexes.
    1499             :      *
    1500             :      * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
    1501             :      * are looking for at this point.
    1502             :      */
    1503      131924 :     if (resultRelInfo->ri_WithCheckOptions != NIL)
    1504         272 :         ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
    1505             : 
    1506             :     /* Process RETURNING if present */
    1507      131880 :     if (resultRelInfo->ri_projectReturning)
    1508        1632 :         return ExecProcessReturning(resultRelInfo, slot, planSlot);
    1509             : 
    1510      130248 :     return NULL;
    1511             : }
    1512             : 
    1513             : /*
    1514             :  * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
    1515             :  *
    1516             :  * Try to lock tuple for update as part of speculative insertion.  If
    1517             :  * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
    1518             :  * (but still lock row, even though it may not satisfy estate's
    1519             :  * snapshot).
    1520             :  *
    1521             :  * Returns true if we're done (with or without an update), or false if
    1522             :  * the caller must retry the INSERT from scratch.
    1523             :  */
    1524             : static bool
    1525        4942 : ExecOnConflictUpdate(ModifyTableState *mtstate,
    1526             :                      ResultRelInfo *resultRelInfo,
    1527             :                      ItemPointer conflictTid,
    1528             :                      TupleTableSlot *planSlot,
    1529             :                      TupleTableSlot *excludedSlot,
    1530             :                      EState *estate,
    1531             :                      bool canSetTag,
    1532             :                      TupleTableSlot **returning)
    1533             : {
    1534        4942 :     ExprContext *econtext = mtstate->ps.ps_ExprContext;
    1535        4942 :     Relation    relation = resultRelInfo->ri_RelationDesc;
    1536        4942 :     ExprState  *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
    1537        4942 :     TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
    1538             :     TM_FailureData tmfd;
    1539             :     LockTupleMode lockmode;
    1540             :     TM_Result   test;
    1541             :     Datum       xminDatum;
    1542             :     TransactionId xmin;
    1543             :     bool        isnull;
    1544             : 
    1545             :     /* Determine lock mode to use */
    1546        4942 :     lockmode = ExecUpdateLockMode(estate, resultRelInfo);
    1547             : 
    1548             :     /*
    1549             :      * Lock tuple for update.  Don't follow updates when tuple cannot be
    1550             :      * locked without doing so.  A row locking conflict here means our
    1551             :      * previous conclusion that the tuple is conclusively committed is not
    1552             :      * true anymore.
    1553             :      */
    1554        4942 :     test = table_tuple_lock(relation, conflictTid,
    1555             :                             estate->es_snapshot,
    1556             :                             existing, estate->es_output_cid,
    1557             :                             lockmode, LockWaitBlock, 0,
    1558             :                             &tmfd);
    1559        4942 :     switch (test)
    1560             :     {
    1561        4926 :         case TM_Ok:
    1562             :             /* success! */
    1563        4926 :             break;
    1564             : 
    1565          16 :         case TM_Invisible:
    1566             : 
    1567             :             /*
    1568             :              * This can occur when a just inserted tuple is updated again in
    1569             :              * the same command. E.g. because multiple rows with the same
    1570             :              * conflicting key values are inserted.
    1571             :              *
    1572             :              * This is somewhat similar to the ExecUpdate() TM_SelfModified
    1573             :              * case.  We do not want to proceed because it would lead to the
    1574             :              * same row being updated a second time in some unspecified order,
    1575             :              * and in contrast to plain UPDATEs there's no historical behavior
    1576             :              * to break.
    1577             :              *
    1578             :              * It is the user's responsibility to prevent this situation from
    1579             :              * occurring.  These problems are why SQL-2003 similarly specifies
    1580             :              * that for SQL MERGE, an exception must be raised in the event of
    1581             :              * an attempt to update the same row twice.
    1582             :              */
    1583          16 :             xminDatum = slot_getsysattr(existing,
    1584             :                                         MinTransactionIdAttributeNumber,
    1585             :                                         &isnull);
    1586             :             Assert(!isnull);
    1587          16 :             xmin = DatumGetTransactionId(xminDatum);
    1588             : 
    1589          16 :             if (TransactionIdIsCurrentTransactionId(xmin))
    1590          16 :                 ereport(ERROR,
    1591             :                         (errcode(ERRCODE_CARDINALITY_VIOLATION),
    1592             :                          errmsg("ON CONFLICT DO UPDATE command cannot affect row a second time"),
    1593             :                          errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
    1594             : 
    1595             :             /* This shouldn't happen */
    1596           0 :             elog(ERROR, "attempted to lock invisible tuple");
    1597             :             break;
    1598             : 
    1599           0 :         case TM_SelfModified:
    1600             : 
    1601             :             /*
    1602             :              * This state should never be reached. As a dirty snapshot is used
    1603             :              * to find conflicting tuples, speculative insertion wouldn't have
    1604             :              * seen this row to conflict with.
    1605             :              */
    1606           0 :             elog(ERROR, "unexpected self-updated tuple");
    1607             :             break;
    1608             : 
    1609           0 :         case TM_Updated:
    1610           0 :             if (IsolationUsesXactSnapshot())
    1611           0 :                 ereport(ERROR,
    1612             :                         (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
    1613             :                          errmsg("could not serialize access due to concurrent update")));
    1614             : 
    1615             :             /*
    1616             :              * As long as we don't support an UPDATE of INSERT ON CONFLICT for
    1617             :              * a partitioned table we shouldn't reach to a case where tuple to
    1618             :              * be lock is moved to another partition due to concurrent update
    1619             :              * of the partition key.
    1620             :              */
    1621             :             Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
    1622             : 
    1623             :             /*
    1624             :              * Tell caller to try again from the very start.
    1625             :              *
    1626             :              * It does not make sense to use the usual EvalPlanQual() style
    1627             :              * loop here, as the new version of the row might not conflict
    1628             :              * anymore, or the conflicting tuple has actually been deleted.
    1629             :              */
    1630           0 :             ExecClearTuple(existing);
    1631           0 :             return false;
    1632             : 
    1633           0 :         case TM_Deleted:
    1634           0 :             if (IsolationUsesXactSnapshot())
    1635           0 :                 ereport(ERROR,
    1636             :                         (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
    1637             :                          errmsg("could not serialize access due to concurrent delete")));
    1638             : 
    1639             :             /* see TM_Updated case */
    1640             :             Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
    1641           0 :             ExecClearTuple(existing);
    1642           0 :             return false;
    1643             : 
    1644           0 :         default:
    1645           0 :             elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
    1646             :     }
    1647             : 
    1648             :     /* Success, the tuple is locked. */
    1649             : 
    1650             :     /*
    1651             :      * Verify that the tuple is visible to our MVCC snapshot if the current
    1652             :      * isolation level mandates that.
    1653             :      *
    1654             :      * It's not sufficient to rely on the check within ExecUpdate() as e.g.
    1655             :      * CONFLICT ... WHERE clause may prevent us from reaching that.
    1656             :      *
    1657             :      * This means we only ever continue when a new command in the current
    1658             :      * transaction could see the row, even though in READ COMMITTED mode the
    1659             :      * tuple will not be visible according to the current statement's
    1660             :      * snapshot.  This is in line with the way UPDATE deals with newer tuple
    1661             :      * versions.
    1662             :      */
    1663        4926 :     ExecCheckTupleVisible(estate, relation, existing);
    1664             : 
    1665             :     /*
    1666             :      * Make tuple and any needed join variables available to ExecQual and
    1667             :      * ExecProject.  The EXCLUDED tuple is installed in ecxt_innertuple, while
    1668             :      * the target's existing tuple is installed in the scantuple.  EXCLUDED
    1669             :      * has been made to reference INNER_VAR in setrefs.c, but there is no
    1670             :      * other redirection.
    1671             :      */
    1672        4926 :     econtext->ecxt_scantuple = existing;
    1673        4926 :     econtext->ecxt_innertuple = excludedSlot;
    1674        4926 :     econtext->ecxt_outertuple = NULL;
    1675             : 
    1676        4926 :     if (!ExecQual(onConflictSetWhere, econtext))
    1677             :     {
    1678          22 :         ExecClearTuple(existing);   /* see return below */
    1679          22 :         InstrCountFiltered1(&mtstate->ps, 1);
    1680          22 :         return true;            /* done with the tuple */
    1681             :     }
    1682             : 
    1683        4904 :     if (resultRelInfo->ri_WithCheckOptions != NIL)
    1684             :     {
    1685             :         /*
    1686             :          * Check target's existing tuple against UPDATE-applicable USING
    1687             :          * security barrier quals (if any), enforced here as RLS checks/WCOs.
    1688             :          *
    1689             :          * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
    1690             :          * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
    1691             :          * but that's almost the extent of its special handling for ON
    1692             :          * CONFLICT DO UPDATE.
    1693             :          *
    1694             :          * The rewriter will also have associated UPDATE applicable straight
    1695             :          * RLS checks/WCOs for the benefit of the ExecUpdate() call that
    1696             :          * follows.  INSERTs and UPDATEs naturally have mutually exclusive WCO
    1697             :          * kinds, so there is no danger of spurious over-enforcement in the
    1698             :          * INSERT or UPDATE path.
    1699             :          */
    1700          40 :         ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
    1701             :                              existing,
    1702             :                              mtstate->ps.state);
    1703             :     }
    1704             : 
    1705             :     /* Project the new tuple version */
    1706        4888 :     ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
    1707             : 
    1708             :     /*
    1709             :      * Note that it is possible that the target tuple has been modified in
    1710             :      * this session, after the above table_tuple_lock. We choose to not error
    1711             :      * out in that case, in line with ExecUpdate's treatment of similar cases.
    1712             :      * This can happen if an UPDATE is triggered from within ExecQual(),
    1713             :      * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
    1714             :      * wCTE in the ON CONFLICT's SET.
    1715             :      */
    1716             : 
    1717             :     /* Execute UPDATE with projection */
    1718       14644 :     *returning = ExecUpdate(mtstate, conflictTid, NULL,
    1719        4888 :                             resultRelInfo->ri_onConflict->oc_ProjSlot,
    1720             :                             planSlot,
    1721             :                             &mtstate->mt_epqstate, mtstate->ps.state,
    1722             :                             canSetTag);
    1723             : 
    1724             :     /*
    1725             :      * Clear out existing tuple, as there might not be another conflict among
    1726             :      * the next input rows. Don't want to hold resources till the end of the
    1727             :      * query.
    1728             :      */
    1729        4868 :     ExecClearTuple(existing);
    1730        4868 :     return true;
    1731             : }
    1732             : 
    1733             : 
    1734             : /*
    1735             :  * Process BEFORE EACH STATEMENT triggers
    1736             :  */
    1737             : static void
    1738       87426 : fireBSTriggers(ModifyTableState *node)
    1739             : {
    1740       87426 :     ModifyTable *plan = (ModifyTable *) node->ps.plan;
    1741       87426 :     ResultRelInfo *resultRelInfo = node->resultRelInfo;
    1742             : 
    1743             :     /*
    1744             :      * If the node modifies a partitioned table, we must fire its triggers.
    1745             :      * Note that in that case, node->resultRelInfo points to the first leaf
    1746             :      * partition, not the root table.
    1747             :      */
    1748       87426 :     if (node->rootResultRelInfo != NULL)
    1749        2636 :         resultRelInfo = node->rootResultRelInfo;
    1750             : 
    1751       87426 :     switch (node->operation)
    1752             :     {
    1753       69112 :         case CMD_INSERT:
    1754       69112 :             ExecBSInsertTriggers(node->ps.state, resultRelInfo);
    1755       69104 :             if (plan->onConflictAction == ONCONFLICT_UPDATE)
    1756         552 :                 ExecBSUpdateTriggers(node->ps.state,
    1757             :                                      resultRelInfo);
    1758       69104 :             break;
    1759       10028 :         case CMD_UPDATE:
    1760       10028 :             ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
    1761       10028 :             break;
    1762        8286 :         case CMD_DELETE:
    1763        8286 :             ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
    1764        8286 :             break;
    1765           0 :         default:
    1766           0 :             elog(ERROR, "unknown operation");
    1767             :             break;
    1768             :     }
    1769       87418 : }
    1770             : 
    1771             : /*
    1772             :  * Return the target rel ResultRelInfo.
    1773             :  *
    1774             :  * This relation is the same as :
    1775             :  * - the relation for which we will fire AFTER STATEMENT triggers.
    1776             :  * - the relation into whose tuple format all captured transition tuples must
    1777             :  *   be converted.
    1778             :  * - the root partitioned table.
    1779             :  */
    1780             : static ResultRelInfo *
    1781      261694 : getTargetResultRelInfo(ModifyTableState *node)
    1782             : {
    1783             :     /*
    1784             :      * Note that if the node modifies a partitioned table, node->resultRelInfo
    1785             :      * points to the first leaf partition, not the root table.
    1786             :      */
    1787      261694 :     if (node->rootResultRelInfo != NULL)
    1788        8184 :         return node->rootResultRelInfo;
    1789             :     else
    1790      253510 :         return node->resultRelInfo;
    1791             : }
    1792             : 
    1793             : /*
    1794             :  * Process AFTER EACH STATEMENT triggers
    1795             :  */
    1796             : static void
    1797       85594 : fireASTriggers(ModifyTableState *node)
    1798             : {
    1799       85594 :     ModifyTable *plan = (ModifyTable *) node->ps.plan;
    1800       85594 :     ResultRelInfo *resultRelInfo = getTargetResultRelInfo(node);
    1801             : 
    1802       85594 :     switch (node->operation)
    1803             :     {
    1804       67712 :         case CMD_INSERT:
    1805       67712 :             if (plan->onConflictAction == ONCONFLICT_UPDATE)
    1806         484 :                 ExecASUpdateTriggers(node->ps.state,
    1807             :                                      resultRelInfo,
    1808         484 :                                      node->mt_oc_transition_capture);
    1809       67712 :             ExecASInsertTriggers(node->ps.state, resultRelInfo,
    1810       67712 :                                  node->mt_transition_capture);
    1811       67712 :             break;
    1812        9692 :         case CMD_UPDATE:
    1813        9692 :             ExecASUpdateTriggers(node->ps.state, resultRelInfo,
    1814        9692 :                                  node->mt_transition_capture);
    1815        9692 :             break;
    1816        8190 :         case CMD_DELETE:
    1817        8190 :             ExecASDeleteTriggers(node->ps.state, resultRelInfo,
    1818        8190 :                                  node->mt_transition_capture);
    1819        8190 :             break;
    1820           0 :         default:
    1821           0 :             elog(ERROR, "unknown operation");
    1822             :             break;
    1823             :     }
    1824       85594 : }
    1825             : 
    1826             : /*
    1827             :  * Set up the state needed for collecting transition tuples for AFTER
    1828             :  * triggers.
    1829             :  */
    1830             : static void
    1831       87450 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
    1832             : {
    1833       87450 :     ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
    1834       87450 :     ResultRelInfo *targetRelInfo = getTargetResultRelInfo(mtstate);
    1835             : 
    1836             :     /* Check for transition tables on the directly targeted relation. */
    1837       87450 :     mtstate->mt_transition_capture =
    1838      174900 :         MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
    1839       87450 :                                    RelationGetRelid(targetRelInfo->ri_RelationDesc),
    1840             :                                    mtstate->operation);
    1841       87450 :     if (plan->operation == CMD_INSERT &&
    1842       69112 :         plan->onConflictAction == ONCONFLICT_UPDATE)
    1843         552 :         mtstate->mt_oc_transition_capture =
    1844         552 :             MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
    1845         552 :                                        RelationGetRelid(targetRelInfo->ri_RelationDesc),
    1846             :                                        CMD_UPDATE);
    1847             : 
    1848             :     /*
    1849             :      * If we found that we need to collect transition tuples then we may also
    1850             :      * need tuple conversion maps for any children that have TupleDescs that
    1851             :      * aren't compatible with the tuplestores.  (We can share these maps
    1852             :      * between the regular and ON CONFLICT cases.)
    1853             :      */
    1854       87450 :     if (mtstate->mt_transition_capture != NULL ||
    1855       87168 :         mtstate->mt_oc_transition_capture != NULL)
    1856             :     {
    1857         286 :         ExecSetupChildParentMapForSubplan(mtstate);
    1858             : 
    1859             :         /*
    1860             :          * Install the conversion map for the first plan for UPDATE and DELETE
    1861             :          * operations.  It will be advanced each time we switch to the next
    1862             :          * plan.  (INSERT operations set it every time, so we need not update
    1863             :          * mtstate->mt_oc_transition_capture here.)
    1864             :          */
    1865         286 :         if (mtstate->mt_transition_capture && mtstate->operation != CMD_INSERT)
    1866         170 :             mtstate->mt_transition_capture->tcs_map =
    1867         170 :                 tupconv_map_for_subplan(mtstate, 0);
    1868             :     }
    1869       87450 : }
    1870             : 
    1871             : /*
    1872             :  * ExecPrepareTupleRouting --- prepare for routing one tuple
    1873             :  *
    1874             :  * Determine the partition in which the tuple in slot is to be inserted,
    1875             :  * and modify mtstate and estate to prepare for it.
    1876             :  *
    1877             :  * Caller must revert the estate changes after executing the insertion!
    1878             :  * In mtstate, transition capture changes may also need to be reverted.
    1879             :  *
    1880             :  * Returns a slot holding the tuple of the partition rowtype.
    1881             :  */
    1882             : static TupleTableSlot *
    1883      504332 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
    1884             :                         EState *estate,
    1885             :                         PartitionTupleRouting *proute,
    1886             :                         ResultRelInfo *targetRelInfo,
    1887             :                         TupleTableSlot *slot)
    1888             : {
    1889             :     ResultRelInfo *partrel;
    1890             :     PartitionRoutingInfo *partrouteinfo;
    1891             :     TupleConversionMap *map;
    1892             : 
    1893             :     /*
    1894             :      * Lookup the target partition's ResultRelInfo.  If ExecFindPartition does
    1895             :      * not find a valid partition for the tuple in 'slot' then an error is
    1896             :      * raised.  An error may also be raised if the found partition is not a
    1897             :      * valid target for INSERTs.  This is required since a partitioned table
    1898             :      * UPDATE to another partition becomes a DELETE+INSERT.
    1899             :      */
    1900      504332 :     partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
    1901      504232 :     partrouteinfo = partrel->ri_PartitionInfo;
    1902             :     Assert(partrouteinfo != NULL);
    1903             : 
    1904             :     /*
    1905             :      * Make it look like we are inserting into the partition.
    1906             :      */
    1907      504232 :     estate->es_result_relation_info = partrel;
    1908             : 
    1909             :     /*
    1910             :      * If we're capturing transition tuples, we might need to convert from the
    1911             :      * partition rowtype to root partitioned table's rowtype.
    1912             :      */
    1913      504232 :     if (mtstate->mt_transition_capture != NULL)
    1914             :     {
    1915          84 :         if (partrel->ri_TrigDesc &&
    1916          28 :             partrel->ri_TrigDesc->trig_insert_before_row)
    1917             :         {
    1918             :             /*
    1919             :              * If there are any BEFORE triggers on the partition, we'll have
    1920             :              * to be ready to convert their result back to tuplestore format.
    1921             :              */
    1922          16 :             mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
    1923          32 :             mtstate->mt_transition_capture->tcs_map =
    1924          16 :                 partrouteinfo->pi_PartitionToRootMap;
    1925             :         }
    1926             :         else
    1927             :         {
    1928             :             /*
    1929             :              * Otherwise, just remember the original unconverted tuple, to
    1930             :              * avoid a needless round trip conversion.
    1931             :              */
    1932          68 :             mtstate->mt_transition_capture->tcs_original_insert_tuple = slot;
    1933          68 :             mtstate->mt_transition_capture->tcs_map = NULL;
    1934             :         }
    1935             :     }
    1936      504232 :     if (mtstate->mt_oc_transition_capture != NULL)
    1937             :     {
    1938          72 :         mtstate->mt_oc_transition_capture->tcs_map =
    1939          36 :             partrouteinfo->pi_PartitionToRootMap;
    1940             :     }
    1941             : 
    1942             :     /*
    1943             :      * Convert the tuple, if necessary.
    1944             :      */
    1945      504232 :     map = partrouteinfo->pi_RootToPartitionMap;
    1946      504232 :     if (map != NULL)
    1947             :     {
    1948       45342 :         TupleTableSlot *new_slot = partrouteinfo->pi_PartitionTupleSlot;
    1949             : 
    1950       45342 :         slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
    1951             :     }
    1952             : 
    1953      504232 :     return slot;
    1954             : }
    1955             : 
    1956             : /*
    1957             :  * Initialize the child-to-root tuple conversion map array for UPDATE subplans.
    1958             :  *
    1959             :  * This map array is required to convert the tuple from the subplan result rel
    1960             :  * to the target table descriptor. This requirement arises for two independent
    1961             :  * scenarios:
    1962             :  * 1. For update-tuple-routing.
    1963             :  * 2. For capturing tuples in transition tables.
    1964             :  */
    1965             : static void
    1966         752 : ExecSetupChildParentMapForSubplan(ModifyTableState *mtstate)
    1967             : {
    1968         752 :     ResultRelInfo *targetRelInfo = getTargetResultRelInfo(mtstate);
    1969         752 :     ResultRelInfo *resultRelInfos = mtstate->resultRelInfo;
    1970             :     TupleDesc   outdesc;
    1971         752 :     int         numResultRelInfos = mtstate->mt_nplans;
    1972             :     int         i;
    1973             : 
    1974             :     /*
    1975             :      * Build array of conversion maps from each child's TupleDesc to the one
    1976             :      * used in the target relation.  The map pointers may be NULL when no
    1977             :      * conversion is necessary, which is hopefully a common case.
    1978             :      */
    1979             : 
    1980             :     /* Get tuple descriptor of the target rel. */
    1981         752 :     outdesc = RelationGetDescr(targetRelInfo->ri_RelationDesc);
    1982             : 
    1983         752 :     mtstate->mt_per_subplan_tupconv_maps = (TupleConversionMap **)
    1984         752 :         palloc(sizeof(TupleConversionMap *) * numResultRelInfos);
    1985             : 
    1986        2144 :     for (i = 0; i < numResultRelInfos; ++i)
    1987             :     {
    1988        1392 :         mtstate->mt_per_subplan_tupconv_maps[i] =
    1989        1392 :             convert_tuples_by_name(RelationGetDescr(resultRelInfos[i].ri_RelationDesc),
    1990             :                                    outdesc);
    1991             :     }
    1992         752 : }
    1993             : 
    1994             : /*
    1995             :  * For a given subplan index, get the tuple conversion map.
    1996             :  */
    1997             : static TupleConversionMap *
    1998         646 : tupconv_map_for_subplan(ModifyTableState *mtstate, int whichplan)
    1999             : {
    2000             :     /* If nobody else set the per-subplan array of maps, do so ourselves. */
    2001         646 :     if (mtstate->mt_per_subplan_tupconv_maps == NULL)
    2002           0 :         ExecSetupChildParentMapForSubplan(mtstate);
    2003             : 
    2004             :     Assert(whichplan >= 0 && whichplan < mtstate->mt_nplans);
    2005         646 :     return mtstate->mt_per_subplan_tupconv_maps[whichplan];
    2006             : }
    2007             : 
    2008             : /* ----------------------------------------------------------------
    2009             :  *     ExecModifyTable
    2010             :  *
    2011             :  *      Perform table modifications as required, and return RETURNING results
    2012             :  *      if needed.
    2013             :  * ----------------------------------------------------------------
    2014             :  */
    2015             : static TupleTableSlot *
    2016       92256 : ExecModifyTable(PlanState *pstate)
    2017             : {
    2018       92256 :     ModifyTableState *node = castNode(ModifyTableState, pstate);
    2019       92256 :     PartitionTupleRouting *proute = node->mt_partition_tuple_routing;
    2020       92256 :     EState     *estate = node->ps.state;
    2021       92256 :     CmdType     operation = node->operation;
    2022             :     ResultRelInfo *saved_resultRelInfo;
    2023             :     ResultRelInfo *resultRelInfo;
    2024             :     PlanState  *subplanstate;
    2025             :     JunkFilter *junkfilter;
    2026             :     TupleTableSlot *slot;
    2027             :     TupleTableSlot *planSlot;
    2028             :     ItemPointer tupleid;
    2029             :     ItemPointerData tuple_ctid;
    2030             :     HeapTupleData oldtupdata;
    2031             :     HeapTuple   oldtuple;
    2032             : 
    2033       92256 :     CHECK_FOR_INTERRUPTS();
    2034             : 
    2035             :     /*
    2036             :      * This should NOT get called during EvalPlanQual; we should have passed a
    2037             :      * subplan tree to EvalPlanQual, instead.  Use a runtime test not just
    2038             :      * Assert because this condition is easy to miss in testing.  (Note:
    2039             :      * although ModifyTable should not get executed within an EvalPlanQual
    2040             :      * operation, we do have to allow it to be initialized and shut down in
    2041             :      * case it is within a CTE subplan.  Hence this test must be here, not in
    2042             :      * ExecInitModifyTable.)
    2043             :      */
    2044       92256 :     if (estate->es_epq_active != NULL)
    2045           0 :         elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
    2046             : 
    2047             :     /*
    2048             :      * If we've already completed processing, don't try to do more.  We need
    2049             :      * this test because ExecPostprocessPlan might call us an extra time, and
    2050             :      * our subplan's nodes aren't necessarily robust against being called
    2051             :      * extra times.
    2052             :      */
    2053       92256 :     if (node->mt_done)
    2054         484 :         return NULL;
    2055             : 
    2056             :     /*
    2057             :      * On first call, fire BEFORE STATEMENT triggers before proceeding.
    2058             :      */
    2059       91772 :     if (node->fireBSTriggers)
    2060             :     {
    2061       87426 :         fireBSTriggers(node);
    2062       87418 :         node->fireBSTriggers = false;
    2063             :     }
    2064             : 
    2065             :     /* Preload local variables */
    2066       91764 :     resultRelInfo = node->resultRelInfo + node->mt_whichplan;
    2067       91764 :     subplanstate = node->mt_plans[node->mt_whichplan];
    2068       91764 :     junkfilter = resultRelInfo->ri_junkFilter;
    2069             : 
    2070             :     /*
    2071             :      * es_result_relation_info must point to the currently active result
    2072             :      * relation while we are within this ModifyTable node.  Even though
    2073             :      * ModifyTable nodes can't be nested statically, they can be nested
    2074             :      * dynamically (since our subplan could include a reference to a modifying
    2075             :      * CTE).  So we have to save and restore the caller's value.
    2076             :      */
    2077       91764 :     saved_resultRelInfo = estate->es_result_relation_info;
    2078             : 
    2079       91764 :     estate->es_result_relation_info = resultRelInfo;
    2080             : 
    2081             :     /*
    2082             :      * Fetch rows from subplan(s), and execute the required table modification
    2083             :      * for each row.
    2084             :      */
    2085             :     for (;;)
    2086             :     {
    2087             :         /*
    2088             :          * Reset the per-output-tuple exprcontext.  This is needed because
    2089             :          * triggers expect to use that context as workspace.  It's a bit ugly
    2090             :          * to do this below the top level of the plan, however.  We might need
    2091             :          * to rethink this later.
    2092             :          */
    2093    11917888 :         ResetPerTupleExprContext(estate);
    2094             : 
    2095             :         /*
    2096             :          * Reset per-tuple memory context used for processing on conflict and
    2097             :          * returning clauses, to free any expression evaluation storage
    2098             :          * allocated in the previous cycle.
    2099             :          */
    2100    11917888 :         if (pstate->ps_ExprContext)
    2101       15250 :             ResetExprContext(pstate->ps_ExprContext);
    2102             : 
    2103    11917888 :         planSlot = ExecProcNode(subplanstate);
    2104             : 
    2105    11917702 :         if (TupIsNull(planSlot))
    2106             :         {
    2107             :             /* advance to next subplan if any */
    2108       86532 :             node->mt_whichplan++;
    2109       86532 :             if (node->mt_whichplan < node->mt_nplans)
    2110             :             {
    2111         938 :                 resultRelInfo++;
    2112         938 :                 subplanstate = node->mt_plans[node->mt_whichplan];
    2113         938 :                 junkfilter = resultRelInfo->ri_junkFilter;
    2114         938 :                 estate->es_result_relation_info = resultRelInfo;
    2115         938 :                 EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan,
    2116         938 :                                     node->mt_arowmarks[node->mt_whichplan]);
    2117             :                 /* Prepare to convert transition tuples from this child. */
    2118         938 :                 if (node->mt_transition_capture != NULL)
    2119             :                 {
    2120         132 :                     node->mt_transition_capture->tcs_map =
    2121         132 :                         tupconv_map_for_subplan(node, node->mt_whichplan);
    2122             :                 }
    2123         938 :                 if (node->mt_oc_transition_capture != NULL)
    2124             :                 {
    2125           0 :                     node->mt_oc_transition_capture->tcs_map =
    2126           0 :                         tupconv_map_for_subplan(node, node->mt_whichplan);
    2127             :                 }
    2128         938 :                 continue;
    2129             :             }
    2130             :             else
    2131       85594 :                 break;
    2132             :         }
    2133             : 
    2134             :         /*
    2135             :          * Ensure input tuple is the right format for the target relation.
    2136             :          */
    2137    11831170 :         if (node->mt_scans[node->mt_whichplan]->tts_ops != planSlot->tts_ops)
    2138             :         {
    2139    11254916 :             ExecCopySlot(node->mt_scans[node->mt_whichplan], planSlot);
    2140    11254916 :             planSlot = node->mt_scans[node->mt_whichplan];
    2141             :         }
    2142             : 
    2143             :         /*
    2144             :          * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
    2145             :          * here is compute the RETURNING expressions.
    2146             :          */
    2147    11831170 :         if (resultRelInfo->ri_usesFdwDirectModify)
    2148             :         {
    2149             :             Assert(resultRelInfo->ri_projectReturning);
    2150             : 
    2151             :             /*
    2152             :              * A scan slot containing the data that was actually inserted,
    2153             :              * updated or deleted has already been made available to
    2154             :              * ExecProcessReturning by IterateDirectModify, so no need to
    2155             :              * provide it here.
    2156             :              */
    2157         692 :             slot = ExecProcessReturning(resultRelInfo, NULL, planSlot);
    2158             : 
    2159         692 :             estate->es_result_relation_info = saved_resultRelInfo;
    2160         692 :             return slot;
    2161             :         }
    2162             : 
    2163    11830478 :         EvalPlanQualSetSlot(&node->mt_epqstate, planSlot);
    2164    11830478 :         slot = planSlot;
    2165             : 
    2166    11830478 :         tupleid = NULL;
    2167    11830478 :         oldtuple = NULL;
    2168    11830478 :         if (junkfilter != NULL)
    2169             :         {
    2170             :             /*
    2171             :              * extract the 'ctid' or 'wholerow' junk attribute.
    2172             :              */
    2173     1004276 :             if (operation == CMD_UPDATE || operation == CMD_DELETE)
    2174             :             {
    2175             :                 char        relkind;
    2176             :                 Datum       datum;
    2177             :                 bool        isNull;
    2178             : 
    2179     1004276 :                 relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
    2180     1004276 :                 if (relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW)
    2181             :                 {
    2182     1004040 :                     datum = ExecGetJunkAttribute(slot,
    2183     1004040 :                                                  junkfilter->jf_junkAttNo,
    2184             :                                                  &isNull);
    2185             :                     /* shouldn't ever get a null result... */
    2186     1004040 :                     if (isNull)
    2187           0 :                         elog(ERROR, "ctid is NULL");
    2188             : 
    2189     1004040 :                     tupleid = (ItemPointer) DatumGetPointer(datum);
    2190     1004040 :                     tuple_ctid = *tupleid;  /* be sure we don't free ctid!! */
    2191     1004040 :                     tupleid = &tuple_ctid;
    2192             :                 }
    2193             : 
    2194             :                 /*
    2195             :                  * Use the wholerow attribute, when available, to reconstruct
    2196             :                  * the old relation tuple.
    2197             :                  *
    2198             :                  * Foreign table updates have a wholerow attribute when the
    2199             :                  * relation has a row-level trigger.  Note that the wholerow
    2200             :                  * attribute does not carry system columns.  Foreign table
    2201             :                  * triggers miss seeing those, except that we know enough here
    2202             :                  * to set t_tableOid.  Quite separately from this, the FDW may
    2203             :                  * fetch its own junk attrs to identify the row.
    2204             :                  *
    2205             :                  * Other relevant relkinds, currently limited to views, always
    2206             :                  * have a wholerow attribute.
    2207             :                  */
    2208         236 :                 else if (AttributeNumberIsValid(junkfilter->jf_junkAttNo))
    2209             :                 {
    2210         176 :                     datum = ExecGetJunkAttribute(slot,
    2211         176 :                                                  junkfilter->jf_junkAttNo,
    2212             :                                                  &isNull);
    2213             :                     /* shouldn't ever get a null result... */
    2214         176 :                     if (isNull)
    2215           0 :                         elog(ERROR, "wholerow is NULL");
    2216             : 
    2217         176 :                     oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
    2218         176 :                     oldtupdata.t_len =
    2219         176 :                         HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
    2220         176 :                     ItemPointerSetInvalid(&(oldtupdata.t_self));
    2221             :                     /* Historically, view triggers see invalid t_tableOid. */
    2222         176 :                     oldtupdata.t_tableOid =
    2223         176 :                         (relkind == RELKIND_VIEW) ? InvalidOid :
    2224          60 :                         RelationGetRelid(resultRelInfo->ri_RelationDesc);
    2225             : 
    2226         176 :                     oldtuple = &oldtupdata;
    2227             :                 }
    2228             :                 else
    2229             :                     Assert(relkind == RELKIND_FOREIGN_TABLE);
    2230             :             }
    2231             : 
    2232             :             /*
    2233             :              * apply the junkfilter if needed.
    2234             :              */
    2235     1004276 :             if (operation != CMD_DELETE)
    2236      127770 :                 slot = ExecFilterJunk(junkfilter, slot);
    2237             :         }
    2238             : 
    2239    11830478 :         switch (operation)
    2240             :         {
    2241    10826202 :             case CMD_INSERT:
    2242             :                 /* Prepare for tuple routing if needed. */
    2243    10826202 :                 if (proute)
    2244      503988 :                     slot = ExecPrepareTupleRouting(node, estate, proute,
    2245             :                                                    resultRelInfo, slot);
    2246    10826116 :                 slot = ExecInsert(node, slot, planSlot,
    2247    10826116 :                                   estate, node->canSetTag);
    2248             :                 /* Revert ExecPrepareTupleRouting's state change. */
    2249    10824898 :                 if (proute)
    2250      503760 :                     estate->es_result_relation_info = resultRelInfo;
    2251    10824898 :                 break;
    2252      127770 :             case CMD_UPDATE:
    2253      127770 :                 slot = ExecUpdate(node, tupleid, oldtuple, slot, planSlot,
    2254      127770 :                                   &node->mt_epqstate, estate, node->canSetTag);
    2255      127514 :                 break;
    2256      876506 :             case CMD_DELETE:
    2257      876506 :                 slot = ExecDelete(node, tupleid, oldtuple, planSlot,
    2258             :                                   &node->mt_epqstate, estate,
    2259      876506 :                                   true, node->canSetTag,
    2260             :                                   false /* changingPart */ , NULL, NULL);
    2261      876438 :                 break;
    2262           0 :             default:
    2263           0 :                 elog(ERROR, "unknown operation");
    2264             :                 break;
    2265             :         }
    2266             : 
    2267             :         /*
    2268             :          * If we got a RETURNING result, return it to caller.  We'll continue
    2269             :          * the work on next call.
    2270             :          */
    2271    11828850 :         if (slot)
    2272             :         {
    2273        3664 :             estate->es_result_relation_info = saved_resultRelInfo;
    2274        3664 :             return slot;
    2275             :         }
    2276             :     }
    2277             : 
    2278             :     /* Restore es_result_relation_info before exiting */
    2279       85594 :     estate->es_result_relation_info = saved_resultRelInfo;
    2280             : 
    2281             :     /*
    2282             :      * We're done, but fire AFTER STATEMENT triggers before exiting.
    2283             :      */
    2284       85594 :     fireASTriggers(node);
    2285             : 
    2286       85594 :     node->mt_done = true;
    2287             : 
    2288       85594 :     return NULL;
    2289             : }
    2290             : 
    2291             : /* ----------------------------------------------------------------
    2292             :  *      ExecInitModifyTable
    2293             :  * ----------------------------------------------------------------
    2294             :  */
    2295             : ModifyTableState *
    2296       87912 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
    2297             : {
    2298             :     ModifyTableState *mtstate;
    2299       87912 :     CmdType     operation = node->operation;
    2300       87912 :     int         nplans = list_length(node->plans);
    2301             :     ResultRelInfo *saved_resultRelInfo;
    2302             :     ResultRelInfo *resultRelInfo;
    2303             :     Plan       *subplan;
    2304             :     ListCell   *l;
    2305             :     int         i;
    2306             :     Relation    rel;
    2307       87912 :     bool        update_tuple_routing_needed = node->partColsUpdated;
    2308             : 
    2309             :     /* check for unsupported flags */
    2310             :     Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
    2311             : 
    2312             :     /*
    2313             :      * create state structure
    2314             :      */
    2315       87912 :     mtstate = makeNode(ModifyTableState);
    2316       87912 :     mtstate->ps.plan = (Plan *) node;
    2317       87912 :     mtstate->ps.state = estate;
    2318       87912 :     mtstate->ps.ExecProcNode = ExecModifyTable;
    2319             : 
    2320       87912 :     mtstate->operation = operation;
    2321       87912 :     mtstate->canSetTag = node->canSetTag;
    2322       87912 :     mtstate->mt_done = false;
    2323             : 
    2324       87912 :     mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans);
    2325       87912 :     mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex;
    2326       87912 :     mtstate->mt_scans = (TupleTableSlot **) palloc0(sizeof(TupleTableSlot *) * nplans);
    2327             : 
    2328             :     /* If modifying a partitioned table, initialize the root table info */
    2329       87912 :     if (node->rootResultRelIndex >= 0)
    2330        5412 :         mtstate->rootResultRelInfo = estate->es_root_result_relations +
    2331        2706 :             node->rootResultRelIndex;
    2332             : 
    2333       87912 :     mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
    2334       87912 :     mtstate->mt_nplans = nplans;
    2335             : 
    2336             :     /* set up epqstate with dummy subplan data for the moment */
    2337       87912 :     EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL, node->epqParam);
    2338       87912 :     mtstate->fireBSTriggers = true;
    2339             : 
    2340             :     /*
    2341             :      * call ExecInitNode on each of the plans to be executed and save the
    2342             :      * results into the array "mt_plans".  This is also a convenient place to
    2343             :      * verify that the proposed target relations are valid and open their
    2344             :      * indexes for insertion of new index entries.  Note we *must* set
    2345             :      * estate->es_result_relation_info correctly while we initialize each
    2346             :      * sub-plan; external modules such as FDWs may depend on that (see
    2347             :      * contrib/postgres_fdw/postgres_fdw.c: postgresBeginDirectModify() as one
    2348             :      * example).
    2349             :      */
    2350       87912 :     saved_resultRelInfo = estate->es_result_relation_info;
    2351             : 
    2352       87912 :     resultRelInfo = mtstate->resultRelInfo;
    2353       87912 :     i = 0;
    2354      177020 :     foreach(l, node->plans)
    2355             :     {
    2356       89122 :         subplan = (Plan *) lfirst(l);
    2357             : 
    2358             :         /* Initialize the usesFdwDirectModify flag */
    2359      178244 :         resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
    2360       89122 :                                                               node->fdwDirectModifyPlans);
    2361             : 
    2362             :         /*
    2363             :          * Verify result relation is a valid target for the current operation
    2364             :          */
    2365       89122 :         CheckValidResultRel(resultRelInfo, operation);
    2366             : 
    2367             :         /*
    2368             :          * If there are indices on the result relation, open them and save
    2369             :          * descriptors in the result relation info, so that we can add new
    2370             :          * index entries for the tuples we add/update.  We need not do this
    2371             :          * for a DELETE, however, since deletion doesn't affect indexes. Also,
    2372             :          * inside an EvalPlanQual operation, the indexes might be open
    2373             :          * already, since we share the resultrel state with the original
    2374             :          * query.
    2375             :          */
    2376       89108 :         if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex &&
    2377       36428 :             operation != CMD_DELETE &&
    2378       36428 :             resultRelInfo->ri_IndexRelationDescs == NULL)
    2379       36414 :             ExecOpenIndices(resultRelInfo,
    2380       36414 :                             node->onConflictAction != ONCONFLICT_NONE);
    2381             : 
    2382             :         /*
    2383             :          * If this is an UPDATE and a BEFORE UPDATE trigger is present, the
    2384             :          * trigger itself might modify the partition-key values. So arrange
    2385             :          * for tuple routing.
    2386             :          */
    2387       89108 :         if (resultRelInfo->ri_TrigDesc &&
    2388        7346 :             resultRelInfo->ri_TrigDesc->trig_update_before_row &&
    2389             :             operation == CMD_UPDATE)
    2390         884 :             update_tuple_routing_needed = true;
    2391             : 
    2392             :         /* Now init the plan for this result rel */
    2393       89108 :         estate->es_result_relation_info = resultRelInfo;
    2394       89108 :         mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags);
    2395      178216 :         mtstate->mt_scans[i] =
    2396       89108 :             ExecInitExtraTupleSlot(mtstate->ps.state, ExecGetResultType(mtstate->mt_plans[i]),
    2397             :                                    table_slot_callbacks(resultRelInfo->ri_RelationDesc));
    2398             : 
    2399             :         /* Also let FDWs init themselves for foreign-table result rels */
    2400       89108 :         if (!resultRelInfo->ri_usesFdwDirectModify &&
    2401       88942 :             resultRelInfo->ri_FdwRoutine != NULL &&
    2402         202 :             resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
    2403             :         {
    2404         202 :             List       *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
    2405             : 
    2406         202 :             resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
    2407             :                                                              resultRelInfo,
    2408             :                                                              fdw_private,
    2409             :                                                              i,
    2410             :                                                              eflags);
    2411             :         }
    2412             : 
    2413       89108 :         resultRelInfo++;
    2414       89108 :         i++;
    2415             :     }
    2416             : 
    2417       87898 :     estate->es_result_relation_info = saved_resultRelInfo;
    2418             : 
    2419             :     /* Get the target relation */
    2420       87898 :     rel = (getTargetResultRelInfo(mtstate))->ri_RelationDesc;
    2421             : 
    2422             :     /*
    2423             :      * If it's not a partitioned table after all, UPDATE tuple routing should
    2424             :      * not be attempted.
    2425             :      */
    2426       87898 :     if (rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
    2427       85192 :         update_tuple_routing_needed = false;
    2428             : 
    2429             :     /*
    2430             :      * Build state for tuple routing if it's an INSERT or if it's an UPDATE of
    2431             :      * partition key.
    2432             :      */
    2433       87898 :     if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
    2434         964 :         (operation == CMD_INSERT || update_tuple_routing_needed))
    2435        2208 :         mtstate->mt_partition_tuple_routing =
    2436        2208 :             ExecSetupPartitionTupleRouting(estate, mtstate, rel);
    2437             : 
    2438             :     /*
    2439             :      * Build state for collecting transition tuples.  This requires having a
    2440             :      * valid trigger query context, so skip it in explain-only mode.
    2441             :      */
    2442       87898 :     if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
    2443       87450 :         ExecSetupTransitionCaptureState(mtstate, estate);
    2444             : 
    2445             :     /*
    2446             :      * Construct mapping from each of the per-subplan partition attnos to the
    2447             :      * root attno.  This is required when during update row movement the tuple
    2448             :      * descriptor of a source partition does not match the root partitioned
    2449             :      * table descriptor.  In such a case we need to convert tuples to the root
    2450             :      * tuple descriptor, because the search for destination partition starts
    2451             :      * from the root.  We'll also need a slot to store these converted tuples.
    2452             :      * We can skip this setup if it's not a partition key update.
    2453             :      */
    2454       87898 :     if (update_tuple_routing_needed)
    2455             :     {
    2456         466 :         ExecSetupChildParentMapForSubplan(mtstate);
    2457         466 :         mtstate->mt_root_tuple_slot = table_slot_create(rel, NULL);
    2458             :     }
    2459             : 
    2460             :     /*
    2461             :      * Initialize any WITH CHECK OPTION constraints if needed.
    2462             :      */
    2463       87898 :     resultRelInfo = mtstate->resultRelInfo;
    2464       87898 :     i = 0;
    2465       88678 :     foreach(l, node->withCheckOptionLists)
    2466             :     {
    2467         780 :         List       *wcoList = (List *) lfirst(l);
    2468         780 :         List       *wcoExprs = NIL;
    2469             :         ListCell   *ll;
    2470             : 
    2471        1926 :         foreach(ll, wcoList)
    2472             :         {
    2473        1146 :             WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
    2474        1146 :             ExprState  *wcoExpr = ExecInitQual((List *) wco->qual,
    2475             :                                                &mtstate->ps);
    2476             : 
    2477        1146 :             wcoExprs = lappend(wcoExprs, wcoExpr);
    2478             :         }
    2479             : 
    2480         780 :         resultRelInfo->ri_WithCheckOptions = wcoList;
    2481         780 :         resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
    2482         780 :         resultRelInfo++;
    2483         780 :         i++;
    2484             :     }
    2485             : 
    2486             :     /*
    2487             :      * Initialize RETURNING projections if needed.
    2488             :      */
    2489       87898 :     if (node->returningLists)
    2490             :     {
    2491             :         TupleTableSlot *slot;
    2492             :         ExprContext *econtext;
    2493             : 
    2494             :         /*
    2495             :          * Initialize result tuple slot and assign its rowtype using the first
    2496             :          * RETURNING list.  We assume the rest will look the same.
    2497             :          */
    2498        1936 :         mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
    2499             : 
    2500             :         /* Set up a slot for the output of the RETURNING projection(s) */
    2501        1936 :         ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
    2502        1936 :         slot = mtstate->ps.ps_ResultTupleSlot;
    2503             : 
    2504             :         /* Need an econtext too */
    2505        1936 :         if (mtstate->ps.ps_ExprContext == NULL)
    2506        1936 :             ExecAssignExprContext(estate, &mtstate->ps);
    2507        1936 :         econtext = mtstate->ps.ps_ExprContext;
    2508             : 
    2509             :         /*
    2510             :          * Build a projection for each result rel.
    2511             :          */
    2512        1936 :         resultRelInfo = mtstate->resultRelInfo;
    2513        4058 :         foreach(l, node->returningLists)
    2514             :         {
    2515        2122 :             List       *rlist = (List *) lfirst(l);
    2516             : 
    2517        2122 :             resultRelInfo->ri_returningList = rlist;
    2518        2122 :             resultRelInfo->ri_projectReturning =
    2519        2122 :                 ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
    2520        2122 :                                         resultRelInfo->ri_RelationDesc->rd_att);
    2521        2122 :             resultRelInfo++;
    2522             :         }
    2523             :     }
    2524             :     else
    2525             :     {
    2526             :         /*
    2527             :          * We still must construct a dummy result tuple type, because InitPlan
    2528             :          * expects one (maybe should change that?).
    2529             :          */
    2530       85962 :         mtstate->ps.plan->targetlist = NIL;
    2531       85962 :         ExecInitResultTypeTL(&mtstate->ps);
    2532             : 
    2533       85962 :         mtstate->ps.ps_ExprContext = NULL;
    2534             :     }
    2535             : 
    2536             :     /* Set the list of arbiter indexes if needed for ON CONFLICT */
    2537       87898 :     resultRelInfo = mtstate->resultRelInfo;
    2538       87898 :     if (node->onConflictAction != ONCONFLICT_NONE)
    2539         820 :         resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
    2540             : 
    2541             :     /*
    2542             :      * If needed, Initialize target list, projection and qual for ON CONFLICT
    2543             :      * DO UPDATE.
    2544             :      */
    2545       87898 :     if (node->onConflictAction == ONCONFLICT_UPDATE)
    2546             :     {
    2547             :         ExprContext *econtext;
    2548             :         TupleDesc   relationDesc;
    2549             :         TupleDesc   tupDesc;
    2550             : 
    2551             :         /* insert may only have one plan, inheritance is not expanded */
    2552             :         Assert(nplans == 1);
    2553             : 
    2554             :         /* already exists if created by RETURNING processing above */
    2555         600 :         if (mtstate->ps.ps_ExprContext == NULL)
    2556         428 :             ExecAssignExprContext(estate, &mtstate->ps);
    2557             : 
    2558         600 :         econtext = mtstate->ps.ps_ExprContext;
    2559         600 :         relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
    2560             : 
    2561             :         /* create state for DO UPDATE SET operation */
    2562         600 :         resultRelInfo->ri_onConflict = makeNode(OnConflictSetState);
    2563             : 
    2564             :         /* initialize slot for the existing tuple */
    2565        1200 :         resultRelInfo->ri_onConflict->oc_Existing =
    2566        1200 :             table_slot_create(resultRelInfo->ri_RelationDesc,
    2567         600 :                               &mtstate->ps.state->es_tupleTable);
    2568             : 
    2569             :         /*
    2570             :          * Create the tuple slot for the UPDATE SET projection. We want a slot
    2571             :          * of the table's type here, because the slot will be used to insert
    2572             :          * into the table, and for RETURNING processing - which may access
    2573             :          * system attributes.
    2574             :          */
    2575         600 :         tupDesc = ExecTypeFromTL((List *) node->onConflictSet);
    2576        1200 :         resultRelInfo->ri_onConflict->oc_ProjSlot =
    2577         600 :             ExecInitExtraTupleSlot(mtstate->ps.state, tupDesc,
    2578             :                                    table_slot_callbacks(resultRelInfo->ri_RelationDesc));
    2579             : 
    2580             :         /* build UPDATE SET projection state */
    2581        1200 :         resultRelInfo->ri_onConflict->oc_ProjInfo =
    2582        1800 :             ExecBuildProjectionInfo(node->onConflictSet, econtext,
    2583         600 :                                     resultRelInfo->ri_onConflict->oc_ProjSlot,
    2584             :                                     &mtstate->ps,
    2585             :                                     relationDesc);
    2586             : 
    2587             :         /* initialize state to evaluate the WHERE clause, if any */
    2588         600 :         if (node->onConflictWhere)
    2589             :         {
    2590             :             ExprState  *qualexpr;
    2591             : 
    2592         122 :             qualexpr = ExecInitQual((List *) node->onConflictWhere,
    2593             :                                     &mtstate->ps);
    2594         122 :             resultRelInfo->ri_onConflict->oc_WhereClause = qualexpr;
    2595             :         }
    2596             :     }
    2597             : 
    2598             :     /*
    2599             :      * If we have any secondary relations in an UPDATE or DELETE, they need to
    2600             :      * be treated like non-locked relations in SELECT FOR UPDATE, ie, the
    2601             :      * EvalPlanQual mechanism needs to be told about them.  Locate the
    2602             :      * relevant ExecRowMarks.
    2603             :      */
    2604       88884 :     foreach(l, node->rowMarks)
    2605             :     {
    2606         986 :         PlanRowMark *rc = lfirst_node(PlanRowMark, l);
    2607             :         ExecRowMark *erm;
    2608             : 
    2609             :         /* ignore "parent" rowmarks; they are irrelevant at runtime */
    2610         986 :         if (rc->isParent)
    2611          64 :             continue;
    2612             : 
    2613             :         /* find ExecRowMark (same for all subplans) */
    2614         922 :         erm = ExecFindRowMark(estate, rc->rti, false);
    2615             : 
    2616             :         /* build ExecAuxRowMark for each subplan */
    2617        2172 :         for (i = 0; i < nplans; i++)
    2618             :         {
    2619             :             ExecAuxRowMark *aerm;
    2620             : 
    2621        1250 :             subplan = mtstate->mt_plans[i]->plan;
    2622        1250 :             aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
    2623        1250 :             mtstate->mt_arowmarks[i] = lappend(mtstate->mt_arowmarks[i], aerm);
    2624             :         }
    2625             :     }
    2626             : 
    2627             :     /* select first subplan */
    2628       87898 :     mtstate->mt_whichplan = 0;
    2629       87898 :     subplan = (Plan *) linitial(node->plans);
    2630       87898 :     EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan,
    2631       87898 :                         mtstate->mt_arowmarks[0]);
    2632             : 
    2633             :     /*
    2634             :      * Initialize the junk filter(s) if needed.  INSERT queries need a filter
    2635             :      * if there are any junk attrs in the tlist.  UPDATE and DELETE always
    2636             :      * need a filter, since there's always at least one junk attribute present
    2637             :      * --- no need to look first.  Typically, this will be a 'ctid' or
    2638             :      * 'wholerow' attribute, but in the case of a foreign data wrapper it
    2639             :      * might be a set of junk attributes sufficient to identify the remote
    2640             :      * row.
    2641             :      *
    2642             :      * If there are multiple result relations, each one needs its own junk
    2643             :      * filter.  Note multiple rels are only possible for UPDATE/DELETE, so we
    2644             :      * can't be fooled by some needing a filter and some not.
    2645             :      *
    2646             :      * This section of code is also a convenient place to verify that the
    2647             :      * output of an INSERT or UPDATE matches the target table(s).
    2648             :      */
    2649             :     {
    2650       87898 :         bool        junk_filter_needed = false;
    2651             : 
    2652       87898 :         switch (operation)
    2653             :         {
    2654       69236 :             case CMD_INSERT:
    2655      311860 :                 foreach(l, subplan->targetlist)
    2656             :                 {
    2657      242624 :                     TargetEntry *tle = (TargetEntry *) lfirst(l);
    2658             : 
    2659      242624 :                     if (tle->resjunk)
    2660             :                     {
    2661           0 :                         junk_filter_needed = true;
    2662           0 :                         break;
    2663             :                     }
    2664             :                 }
    2665       69236 :                 break;
    2666       18662 :             case CMD_UPDATE:
    2667             :             case CMD_DELETE:
    2668       18662 :                 junk_filter_needed = true;
    2669       18662 :                 break;
    2670           0 :             default:
    2671           0 :                 elog(ERROR, "unknown operation");
    2672             :                 break;
    2673             :         }
    2674             : 
    2675       87898 :         if (junk_filter_needed)
    2676             :         {
    2677       18662 :             resultRelInfo = mtstate->resultRelInfo;
    2678       38530 :             for (i = 0; i < nplans; i++)
    2679             :             {
    2680             :                 JunkFilter *j;
    2681             :                 TupleTableSlot *junkresslot;
    2682             : 
    2683       19868 :                 subplan = mtstate->mt_plans[i]->plan;
    2684       19868 :                 if (operation == CMD_INSERT || operation == CMD_UPDATE)
    2685       11220 :                     ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc,
    2686             :                                         subplan->targetlist);
    2687             : 
    2688             :                 junkresslot =
    2689       19868 :                     ExecInitExtraTupleSlot(estate, NULL,
    2690             :                                            table_slot_callbacks(resultRelInfo->ri_RelationDesc));
    2691       19868 :                 j = ExecInitJunkFilter(subplan->targetlist,
    2692             :                                        junkresslot);
    2693             : 
    2694       19868 :                 if (operation == CMD_UPDATE || operation == CMD_DELETE)
    2695             :                 {
    2696             :                     /* For UPDATE/DELETE, find the appropriate junk attr now */
    2697             :                     char        relkind;
    2698             : 
    2699       19868 :                     relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
    2700       19868 :                     if (relkind == RELKIND_RELATION ||
    2701         416 :                         relkind == RELKIND_MATVIEW ||
    2702             :                         relkind == RELKIND_PARTITIONED_TABLE)
    2703             :                     {
    2704       19464 :                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "ctid");
    2705       19464 :                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
    2706           0 :                             elog(ERROR, "could not find junk ctid column");
    2707             :                     }
    2708         404 :                     else if (relkind == RELKIND_FOREIGN_TABLE)
    2709             :                     {
    2710             :                         /*
    2711             :                          * When there is a row-level trigger, there should be
    2712             :                          * a wholerow attribute.
    2713             :                          */
    2714         272 :                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
    2715             :                     }
    2716             :                     else
    2717             :                     {
    2718         132 :                         j->jf_junkAttNo = ExecFindJunkAttribute(j, "wholerow");
    2719         132 :                         if (!AttributeNumberIsValid(j->jf_junkAttNo))
    2720           0 :                             elog(ERROR, "could not find junk wholerow column");
    2721             :                     }
    2722             :                 }
    2723             : 
    2724       19868 :                 resultRelInfo->ri_junkFilter = j;
    2725       19868 :                 resultRelInfo++;
    2726             :             }
    2727             :         }
    2728             :         else
    2729             :         {
    2730       69236 :             if (operation == CMD_INSERT)
    2731       69236 :                 ExecCheckPlanOutput(mtstate->resultRelInfo->ri_RelationDesc,
    2732             :                                     subplan->targetlist);
    2733             :         }
    2734             :     }
    2735             : 
    2736             :     /*
    2737             :      * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
    2738             :      * to estate->es_auxmodifytables so that it will be run to completion by
    2739             :      * ExecPostprocessPlan.  (It'd actually work fine to add the primary
    2740             :      * ModifyTable node too, but there's no need.)  Note the use of lcons not
    2741             :      * lappend: we need later-initialized ModifyTable nodes to be shut down
    2742             :      * before earlier ones.  This ensures that we don't throw away RETURNING
    2743             :      * rows that need to be seen by a later CTE subplan.
    2744             :      */
    2745       87898 :     if (!mtstate->canSetTag)
    2746         578 :         estate->es_auxmodifytables = lcons(mtstate,
    2747             :                                            estate->es_auxmodifytables);
    2748             : 
    2749       87898 :     return mtstate;
    2750             : }
    2751             : 
    2752             : /* ----------------------------------------------------------------
    2753             :  *      ExecEndModifyTable
    2754             :  *
    2755             :  *      Shuts down the plan.
    2756             :  *
    2757             :  *      Returns nothing of interest.
    2758             :  * ----------------------------------------------------------------
    2759             :  */
    2760             : void
    2761       85548 : ExecEndModifyTable(ModifyTableState *node)
    2762             : {
    2763             :     int         i;
    2764             : 
    2765             :     /*
    2766             :      * Allow any FDWs to shut down
    2767             :      */
    2768      172174 :     for (i = 0; i < node->mt_nplans; i++)
    2769             :     {
    2770       86626 :         ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
    2771             : 
    2772       86626 :         if (!resultRelInfo->ri_usesFdwDirectModify &&
    2773       86470 :             resultRelInfo->ri_FdwRoutine != NULL &&
    2774         186 :             resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
    2775         186 :             resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
    2776             :                                                            resultRelInfo);
    2777             :     }
    2778             : 
    2779             :     /*
    2780             :      * Close all the partitioned tables, leaf partitions, and their indices
    2781             :      * and release the slot used for tuple routing, if set.
    2782             :      */
    2783       85548 :     if (node->mt_partition_tuple_routing)
    2784             :     {
    2785        1796 :         ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
    2786             : 
    2787        1796 :         if (node->mt_root_tuple_slot)
    2788         366 :             ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
    2789             :     }
    2790             : 
    2791             :     /*
    2792             :      * Free the exprcontext
    2793             :      */
    2794       85548 :     ExecFreeExprContext(&node->ps);
    2795             : 
    2796             :     /*
    2797             :      * clean out the tuple table
    2798             :      */
    2799       85548 :     if (node->ps.ps_ResultTupleSlot)
    2800        1852 :         ExecClearTuple(node->ps.ps_ResultTupleSlot);
    2801             : 
    2802             :     /*
    2803             :      * Terminate EPQ execution if active
    2804             :      */
    2805       85548 :     EvalPlanQualEnd(&node->mt_epqstate);
    2806             : 
    2807             :     /*
    2808             :      * shut down subplans
    2809             :      */
    2810      172174 :     for (i = 0; i < node->mt_nplans; i++)
    2811       86626 :         ExecEndNode(node->mt_plans[i]);
    2812       85548 : }
    2813             : 
    2814             : void
    2815           0 : ExecReScanModifyTable(ModifyTableState *node)
    2816             : {
    2817             :     /*
    2818             :      * Currently, we don't need to support rescan on ModifyTable nodes. The
    2819             :      * semantics of that would be a bit debatable anyway.
    2820             :      */
    2821           0 :     elog(ERROR, "ExecReScanModifyTable is not implemented");
    2822             : }

Generated by: LCOV version 1.13