Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * The relation to modify can be an ordinary table, a foreign table, or a
28 : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : * targeted a view not in one of those two categories, earlier processing
31 : * already pointed the ModifyTable result relation to an underlying
32 : * relation of that other view. This node does process
33 : * ri_WithCheckOptions, which may have expressions from those other,
34 : * automatically updatable views.
35 : *
36 : * MERGE runs a join between the source relation and the target table.
37 : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : * is an outer join that might output tuples without a matching target
39 : * tuple. In this case, any unmatched target tuples will have NULL
40 : * row-locating info, and only INSERT can be run. But for matched target
41 : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : * SOURCE, all tuples produced by the join will include a matching target
44 : * tuple, so all tuples contain row-locating info.
45 : *
46 : * If the query specifies RETURNING, then the ModifyTable returns a
47 : * RETURNING tuple after completing each row insert, update, or delete.
48 : * It must be called again to continue the operation. Without RETURNING,
49 : * we just loop within the node until all the work is done, then
50 : * return NULL. This avoids useless call/return overhead.
51 : */
52 :
53 : #include "postgres.h"
54 :
55 : #include "access/htup_details.h"
56 : #include "access/tableam.h"
57 : #include "access/xact.h"
58 : #include "commands/trigger.h"
59 : #include "executor/execPartition.h"
60 : #include "executor/executor.h"
61 : #include "executor/nodeModifyTable.h"
62 : #include "foreign/fdwapi.h"
63 : #include "miscadmin.h"
64 : #include "nodes/nodeFuncs.h"
65 : #include "optimizer/optimizer.h"
66 : #include "rewrite/rewriteHandler.h"
67 : #include "rewrite/rewriteManip.h"
68 : #include "storage/lmgr.h"
69 : #include "utils/builtins.h"
70 : #include "utils/datum.h"
71 : #include "utils/injection_point.h"
72 : #include "utils/rel.h"
73 : #include "utils/snapmgr.h"
74 :
75 :
76 : typedef struct MTTargetRelLookup
77 : {
78 : Oid relationOid; /* hash key, must be first */
79 : int relationIndex; /* rel's index in resultRelInfo[] array */
80 : } MTTargetRelLookup;
81 :
82 : /*
83 : * Context struct for a ModifyTable operation, containing basic execution
84 : * state and some output variables populated by ExecUpdateAct() and
85 : * ExecDeleteAct() to report the result of their actions to callers.
86 : */
87 : typedef struct ModifyTableContext
88 : {
89 : /* Operation state */
90 : ModifyTableState *mtstate;
91 : EPQState *epqstate;
92 : EState *estate;
93 :
94 : /*
95 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
96 : * access "junk" columns that are not going to be stored.
97 : */
98 : TupleTableSlot *planSlot;
99 :
100 : /*
101 : * Information about the changes that were made concurrently to a tuple
102 : * being updated or deleted
103 : */
104 : TM_FailureData tmfd;
105 :
106 : /*
107 : * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
108 : * clause that refers to OLD columns (converted to the root's tuple
109 : * descriptor).
110 : */
111 : TupleTableSlot *cpDeletedSlot;
112 :
113 : /*
114 : * The tuple projected by the INSERT's RETURNING clause, when doing a
115 : * cross-partition UPDATE
116 : */
117 : TupleTableSlot *cpUpdateReturningSlot;
118 : } ModifyTableContext;
119 :
120 : /*
121 : * Context struct containing output data specific to UPDATE operations.
122 : */
123 : typedef struct UpdateContext
124 : {
125 : bool crossPartUpdate; /* was it a cross-partition update? */
126 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
127 :
128 : /*
129 : * Lock mode to acquire on the latest tuple version before performing
130 : * EvalPlanQual on it
131 : */
132 : LockTupleMode lockmode;
133 : } UpdateContext;
134 :
135 :
136 : static void ExecBatchInsert(ModifyTableState *mtstate,
137 : ResultRelInfo *resultRelInfo,
138 : TupleTableSlot **slots,
139 : TupleTableSlot **planSlots,
140 : int numSlots,
141 : EState *estate,
142 : bool canSetTag);
143 : static void ExecPendingInserts(EState *estate);
144 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
145 : ResultRelInfo *sourcePartInfo,
146 : ResultRelInfo *destPartInfo,
147 : ItemPointer tupleid,
148 : TupleTableSlot *oldslot,
149 : TupleTableSlot *newslot);
150 : static bool ExecOnConflictLockRow(ModifyTableContext *context,
151 : TupleTableSlot *existing,
152 : ItemPointer conflictTid,
153 : Relation relation,
154 : LockTupleMode lockmode,
155 : bool isUpdate);
156 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
157 : ResultRelInfo *resultRelInfo,
158 : ItemPointer conflictTid,
159 : TupleTableSlot *excludedSlot,
160 : bool canSetTag,
161 : TupleTableSlot **returning);
162 : static bool ExecOnConflictSelect(ModifyTableContext *context,
163 : ResultRelInfo *resultRelInfo,
164 : ItemPointer conflictTid,
165 : TupleTableSlot *excludedSlot,
166 : bool canSetTag,
167 : TupleTableSlot **returning);
168 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
169 : EState *estate,
170 : PartitionTupleRouting *proute,
171 : ResultRelInfo *targetRelInfo,
172 : TupleTableSlot *slot,
173 : ResultRelInfo **partRelInfo);
174 :
175 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
176 : ResultRelInfo *resultRelInfo,
177 : ItemPointer tupleid,
178 : HeapTuple oldtuple,
179 : bool canSetTag);
180 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
181 : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
182 : ResultRelInfo *resultRelInfo,
183 : ItemPointer tupleid,
184 : HeapTuple oldtuple,
185 : bool canSetTag,
186 : bool *matched);
187 : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
188 : ResultRelInfo *resultRelInfo,
189 : bool canSetTag);
190 :
191 :
192 : /*
193 : * Verify that the tuples to be produced by INSERT match the
194 : * target relation's rowtype
195 : *
196 : * We do this to guard against stale plans. If plan invalidation is
197 : * functioning properly then we should never get a failure here, but better
198 : * safe than sorry. Note that this is called after we have obtained lock
199 : * on the target rel, so the rowtype can't change underneath us.
200 : *
201 : * The plan output is represented by its targetlist, because that makes
202 : * handling the dropped-column case easier.
203 : *
204 : * We used to use this for UPDATE as well, but now the equivalent checks
205 : * are done in ExecBuildUpdateProjection.
206 : */
207 : static void
208 45424 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
209 : {
210 45424 : TupleDesc resultDesc = RelationGetDescr(resultRel);
211 45424 : int attno = 0;
212 : ListCell *lc;
213 :
214 140932 : foreach(lc, targetList)
215 : {
216 95508 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
217 : Form_pg_attribute attr;
218 :
219 : Assert(!tle->resjunk); /* caller removed junk items already */
220 :
221 95508 : if (attno >= resultDesc->natts)
222 0 : ereport(ERROR,
223 : (errcode(ERRCODE_DATATYPE_MISMATCH),
224 : errmsg("table row type and query-specified row type do not match"),
225 : errdetail("Query has too many columns.")));
226 95508 : attr = TupleDescAttr(resultDesc, attno);
227 95508 : attno++;
228 :
229 : /*
230 : * Special cases here should match planner's expand_insert_targetlist.
231 : */
232 95508 : if (attr->attisdropped)
233 : {
234 : /*
235 : * For a dropped column, we can't check atttypid (it's likely 0).
236 : * In any case the planner has most likely inserted an INT4 null.
237 : * What we insist on is just *some* NULL constant.
238 : */
239 341 : if (!IsA(tle->expr, Const) ||
240 341 : !((Const *) tle->expr)->constisnull)
241 0 : ereport(ERROR,
242 : (errcode(ERRCODE_DATATYPE_MISMATCH),
243 : errmsg("table row type and query-specified row type do not match"),
244 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
245 : attno)));
246 : }
247 95167 : else if (attr->attgenerated)
248 : {
249 : /*
250 : * For a generated column, the planner will have inserted a null
251 : * of the column's base type (to avoid possibly failing on domain
252 : * not-null constraints). It doesn't seem worth insisting on that
253 : * exact type though, since a null value is type-independent. As
254 : * above, just insist on *some* NULL constant.
255 : */
256 613 : if (!IsA(tle->expr, Const) ||
257 613 : !((Const *) tle->expr)->constisnull)
258 0 : ereport(ERROR,
259 : (errcode(ERRCODE_DATATYPE_MISMATCH),
260 : errmsg("table row type and query-specified row type do not match"),
261 : errdetail("Query provides a value for a generated column at ordinal position %d.",
262 : attno)));
263 : }
264 : else
265 : {
266 : /* Normal case: demand type match */
267 94554 : if (exprType((Node *) tle->expr) != attr->atttypid)
268 0 : ereport(ERROR,
269 : (errcode(ERRCODE_DATATYPE_MISMATCH),
270 : errmsg("table row type and query-specified row type do not match"),
271 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
272 : format_type_be(attr->atttypid),
273 : attno,
274 : format_type_be(exprType((Node *) tle->expr)))));
275 : }
276 : }
277 45424 : if (attno != resultDesc->natts)
278 0 : ereport(ERROR,
279 : (errcode(ERRCODE_DATATYPE_MISMATCH),
280 : errmsg("table row type and query-specified row type do not match"),
281 : errdetail("Query has too few columns.")));
282 45424 : }
283 :
284 : /*
285 : * ExecProcessReturning --- evaluate a RETURNING list
286 : *
287 : * context: context for the ModifyTable operation
288 : * resultRelInfo: current result rel
289 : * isDelete: true if the operation/merge action is a DELETE
290 : * oldSlot: slot holding old tuple deleted or updated
291 : * newSlot: slot holding new tuple inserted or updated
292 : * planSlot: slot holding tuple returned by top subplan node
293 : *
294 : * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
295 : * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
296 : * modify is disabled if the RETURNING list refers to any OLD/NEW values).
297 : *
298 : * Note: For the SELECT path of INSERT ... ON CONFLICT DO SELECT, oldSlot and
299 : * newSlot are both the existing tuple, since it's not changed.
300 : *
301 : * Returns a slot holding the result tuple
302 : */
303 : static TupleTableSlot *
304 4918 : ExecProcessReturning(ModifyTableContext *context,
305 : ResultRelInfo *resultRelInfo,
306 : bool isDelete,
307 : TupleTableSlot *oldSlot,
308 : TupleTableSlot *newSlot,
309 : TupleTableSlot *planSlot)
310 : {
311 4918 : EState *estate = context->estate;
312 4918 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
313 4918 : ExprContext *econtext = projectReturning->pi_exprContext;
314 :
315 : /* Make tuple and any needed join variables available to ExecProject */
316 4918 : if (isDelete)
317 : {
318 : /* return old tuple by default */
319 721 : if (oldSlot)
320 602 : econtext->ecxt_scantuple = oldSlot;
321 : }
322 : else
323 : {
324 : /* return new tuple by default */
325 4197 : if (newSlot)
326 3969 : econtext->ecxt_scantuple = newSlot;
327 : }
328 4918 : econtext->ecxt_outertuple = planSlot;
329 :
330 : /* Make old/new tuples available to ExecProject, if required */
331 4918 : if (oldSlot)
332 2052 : econtext->ecxt_oldtuple = oldSlot;
333 2866 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
334 95 : econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
335 : else
336 2771 : econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
337 :
338 4918 : if (newSlot)
339 3969 : econtext->ecxt_newtuple = newSlot;
340 949 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
341 66 : econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
342 : else
343 883 : econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
344 :
345 : /*
346 : * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
347 : * information is required to evaluate ReturningExpr nodes and also in
348 : * ExecEvalSysVar() and ExecEvalWholeRowVar().
349 : */
350 4918 : if (oldSlot == NULL)
351 2866 : projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
352 : else
353 2052 : projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
354 :
355 4918 : if (newSlot == NULL)
356 949 : projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
357 : else
358 3969 : projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
359 :
360 : /* Compute the RETURNING expressions */
361 4918 : return ExecProject(projectReturning);
362 : }
363 :
364 : /*
365 : * ExecCheckTupleVisible -- verify tuple is visible
366 : *
367 : * It would not be consistent with guarantees of the higher isolation levels to
368 : * proceed with avoiding insertion (taking speculative insertion's alternative
369 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
370 : * Check for the need to raise a serialization failure, and do so as necessary.
371 : */
372 : static void
373 2779 : ExecCheckTupleVisible(EState *estate,
374 : Relation rel,
375 : TupleTableSlot *slot)
376 : {
377 2779 : if (!IsolationUsesXactSnapshot())
378 2741 : return;
379 :
380 38 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
381 : {
382 : Datum xminDatum;
383 : TransactionId xmin;
384 : bool isnull;
385 :
386 26 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
387 : Assert(!isnull);
388 26 : xmin = DatumGetTransactionId(xminDatum);
389 :
390 : /*
391 : * We should not raise a serialization failure if the conflict is
392 : * against a tuple inserted by our own transaction, even if it's not
393 : * visible to our snapshot. (This would happen, for example, if
394 : * conflicting keys are proposed for insertion in a single command.)
395 : */
396 26 : if (!TransactionIdIsCurrentTransactionId(xmin))
397 10 : ereport(ERROR,
398 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
399 : errmsg("could not serialize access due to concurrent update")));
400 : }
401 : }
402 :
403 : /*
404 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
405 : */
406 : static void
407 112 : ExecCheckTIDVisible(EState *estate,
408 : ResultRelInfo *relinfo,
409 : ItemPointer tid,
410 : TupleTableSlot *tempSlot)
411 : {
412 112 : Relation rel = relinfo->ri_RelationDesc;
413 :
414 : /* Redundantly check isolation level */
415 112 : if (!IsolationUsesXactSnapshot())
416 80 : return;
417 :
418 32 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
419 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
420 32 : ExecCheckTupleVisible(estate, rel, tempSlot);
421 22 : ExecClearTuple(tempSlot);
422 : }
423 :
424 : /*
425 : * Initialize generated columns handling for a tuple
426 : *
427 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or
428 : * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
429 : * This is used only for stored generated columns.
430 : *
431 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
432 : * This is used by both stored and virtual generated columns.
433 : *
434 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
435 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
436 : * cross-partition UPDATEs, since a partition might be the target of both
437 : * UPDATE and INSERT actions.
438 : */
439 : void
440 29965 : ExecInitGenerated(ResultRelInfo *resultRelInfo,
441 : EState *estate,
442 : CmdType cmdtype)
443 : {
444 29965 : Relation rel = resultRelInfo->ri_RelationDesc;
445 29965 : TupleDesc tupdesc = RelationGetDescr(rel);
446 29965 : int natts = tupdesc->natts;
447 : ExprState **ri_GeneratedExprs;
448 : int ri_NumGeneratedNeeded;
449 : Bitmapset *updatedCols;
450 : MemoryContext oldContext;
451 :
452 : /* Nothing to do if no generated columns */
453 29965 : if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual)))
454 29363 : return;
455 :
456 : /*
457 : * In an UPDATE, we can skip computing any generated columns that do not
458 : * depend on any UPDATE target column. But if there is a BEFORE ROW
459 : * UPDATE trigger, we cannot skip because the trigger might change more
460 : * columns.
461 : */
462 602 : if (cmdtype == CMD_UPDATE &&
463 135 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
464 113 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
465 : else
466 489 : updatedCols = NULL;
467 :
468 : /*
469 : * Make sure these data structures are built in the per-query memory
470 : * context so they'll survive throughout the query.
471 : */
472 602 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
473 :
474 602 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
475 602 : ri_NumGeneratedNeeded = 0;
476 :
477 2453 : for (int i = 0; i < natts; i++)
478 : {
479 1854 : char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated;
480 :
481 1854 : if (attgenerated)
482 : {
483 : Expr *expr;
484 :
485 : /* Fetch the GENERATED AS expression tree */
486 644 : expr = (Expr *) build_column_default(rel, i + 1);
487 644 : if (expr == NULL)
488 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
489 : i + 1, RelationGetRelationName(rel));
490 :
491 : /*
492 : * If it's an update with a known set of update target columns,
493 : * see if we can skip the computation.
494 : */
495 644 : if (updatedCols)
496 : {
497 120 : Bitmapset *attrs_used = NULL;
498 :
499 120 : pull_varattnos((Node *) expr, 1, &attrs_used);
500 :
501 120 : if (!bms_overlap(updatedCols, attrs_used))
502 12 : continue; /* need not update this column */
503 : }
504 :
505 : /* No luck, so prepare the expression for execution */
506 632 : if (attgenerated == ATTRIBUTE_GENERATED_STORED)
507 : {
508 590 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
509 587 : ri_NumGeneratedNeeded++;
510 : }
511 :
512 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
513 629 : if (cmdtype == CMD_UPDATE)
514 134 : resultRelInfo->ri_extraUpdatedCols =
515 134 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
516 : i + 1 - FirstLowInvalidHeapAttributeNumber);
517 : }
518 : }
519 :
520 599 : if (ri_NumGeneratedNeeded == 0)
521 : {
522 : /* didn't need it after all */
523 21 : pfree(ri_GeneratedExprs);
524 21 : ri_GeneratedExprs = NULL;
525 : }
526 :
527 : /* Save in appropriate set of fields */
528 599 : if (cmdtype == CMD_UPDATE)
529 : {
530 : /* Don't call twice */
531 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
532 :
533 135 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
534 135 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
535 :
536 135 : resultRelInfo->ri_extraUpdatedCols_valid = true;
537 : }
538 : else
539 : {
540 : /* Don't call twice */
541 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
542 :
543 464 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
544 464 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
545 : }
546 :
547 599 : MemoryContextSwitchTo(oldContext);
548 : }
549 :
550 : /*
551 : * Compute stored generated columns for a tuple
552 : */
553 : void
554 824 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
555 : EState *estate, TupleTableSlot *slot,
556 : CmdType cmdtype)
557 : {
558 824 : Relation rel = resultRelInfo->ri_RelationDesc;
559 824 : TupleDesc tupdesc = RelationGetDescr(rel);
560 824 : int natts = tupdesc->natts;
561 824 : ExprContext *econtext = GetPerTupleExprContext(estate);
562 : ExprState **ri_GeneratedExprs;
563 : MemoryContext oldContext;
564 : Datum *values;
565 : bool *nulls;
566 :
567 : /* We should not be called unless this is true */
568 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
569 :
570 : /*
571 : * Initialize the expressions if we didn't already, and check whether we
572 : * can exit early because nothing needs to be computed.
573 : */
574 824 : if (cmdtype == CMD_UPDATE)
575 : {
576 146 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
577 110 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
578 146 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
579 9 : return;
580 137 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
581 : }
582 : else
583 : {
584 678 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
585 467 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
586 : /* Early exit is impossible given the prior Assert */
587 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
588 675 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
589 : }
590 :
591 812 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
592 :
593 812 : values = palloc_array(Datum, natts);
594 812 : nulls = palloc_array(bool, natts);
595 :
596 812 : slot_getallattrs(slot);
597 812 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
598 :
599 3307 : for (int i = 0; i < natts; i++)
600 : {
601 2507 : CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
602 :
603 2507 : if (ri_GeneratedExprs[i])
604 : {
605 : Datum val;
606 : bool isnull;
607 :
608 : Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
609 :
610 823 : econtext->ecxt_scantuple = slot;
611 :
612 823 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
613 :
614 : /*
615 : * We must make a copy of val as we have no guarantees about where
616 : * memory for a pass-by-reference Datum is located.
617 : */
618 811 : if (!isnull)
619 787 : val = datumCopy(val, attr->attbyval, attr->attlen);
620 :
621 811 : values[i] = val;
622 811 : nulls[i] = isnull;
623 : }
624 : else
625 : {
626 1684 : if (!nulls[i])
627 1610 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
628 : }
629 : }
630 :
631 800 : ExecClearTuple(slot);
632 800 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
633 800 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
634 800 : ExecStoreVirtualTuple(slot);
635 800 : ExecMaterializeSlot(slot);
636 :
637 800 : MemoryContextSwitchTo(oldContext);
638 : }
639 :
640 : /*
641 : * ExecInitInsertProjection
642 : * Do one-time initialization of projection data for INSERT tuples.
643 : *
644 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
645 : *
646 : * This is also a convenient place to verify that the
647 : * output of an INSERT matches the target table.
648 : */
649 : static void
650 44880 : ExecInitInsertProjection(ModifyTableState *mtstate,
651 : ResultRelInfo *resultRelInfo)
652 : {
653 44880 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
654 44880 : Plan *subplan = outerPlan(node);
655 44880 : EState *estate = mtstate->ps.state;
656 44880 : List *insertTargetList = NIL;
657 44880 : bool need_projection = false;
658 : ListCell *l;
659 :
660 : /* Extract non-junk columns of the subplan's result tlist. */
661 138979 : foreach(l, subplan->targetlist)
662 : {
663 94099 : TargetEntry *tle = (TargetEntry *) lfirst(l);
664 :
665 94099 : if (!tle->resjunk)
666 94099 : insertTargetList = lappend(insertTargetList, tle);
667 : else
668 0 : need_projection = true;
669 : }
670 :
671 : /*
672 : * The junk-free list must produce a tuple suitable for the result
673 : * relation.
674 : */
675 44880 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
676 :
677 : /* We'll need a slot matching the table's format. */
678 44880 : resultRelInfo->ri_newTupleSlot =
679 44880 : table_slot_create(resultRelInfo->ri_RelationDesc,
680 : &estate->es_tupleTable);
681 :
682 : /* Build ProjectionInfo if needed (it probably isn't). */
683 44880 : if (need_projection)
684 : {
685 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
686 :
687 : /* need an expression context to do the projection */
688 0 : if (mtstate->ps.ps_ExprContext == NULL)
689 0 : ExecAssignExprContext(estate, &mtstate->ps);
690 :
691 0 : resultRelInfo->ri_projectNew =
692 0 : ExecBuildProjectionInfo(insertTargetList,
693 : mtstate->ps.ps_ExprContext,
694 : resultRelInfo->ri_newTupleSlot,
695 : &mtstate->ps,
696 : relDesc);
697 : }
698 :
699 44880 : resultRelInfo->ri_projectNewInfoValid = true;
700 44880 : }
701 :
702 : /*
703 : * ExecInitUpdateProjection
704 : * Do one-time initialization of projection data for UPDATE tuples.
705 : *
706 : * UPDATE always needs a projection, because (1) there's always some junk
707 : * attrs, and (2) we may need to merge values of not-updated columns from
708 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
709 : * the subplan contains only new values for the changed columns, plus row
710 : * identity info in the junk attrs.
711 : *
712 : * This is "one-time" for any given result rel, but we might touch more than
713 : * one result rel in the course of an inherited UPDATE, and each one needs
714 : * its own projection due to possible column order variation.
715 : *
716 : * This is also a convenient place to verify that the output of an UPDATE
717 : * matches the target table (ExecBuildUpdateProjection does that).
718 : */
719 : static void
720 7057 : ExecInitUpdateProjection(ModifyTableState *mtstate,
721 : ResultRelInfo *resultRelInfo)
722 : {
723 7057 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
724 7057 : Plan *subplan = outerPlan(node);
725 7057 : EState *estate = mtstate->ps.state;
726 7057 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
727 : int whichrel;
728 : List *updateColnos;
729 :
730 : /*
731 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
732 : * to, we can get the index the hard way with an integer division.
733 : */
734 7057 : whichrel = mtstate->mt_lastResultIndex;
735 7057 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
736 : {
737 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
738 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
739 : }
740 :
741 7057 : updateColnos = (List *) list_nth(mtstate->mt_updateColnosLists, whichrel);
742 :
743 : /*
744 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
745 : * produced by the subplan to get the new tuple. We need two slots, both
746 : * matching the table's desired format.
747 : */
748 7057 : resultRelInfo->ri_oldTupleSlot =
749 7057 : table_slot_create(resultRelInfo->ri_RelationDesc,
750 : &estate->es_tupleTable);
751 7057 : resultRelInfo->ri_newTupleSlot =
752 7057 : table_slot_create(resultRelInfo->ri_RelationDesc,
753 : &estate->es_tupleTable);
754 :
755 : /* need an expression context to do the projection */
756 7057 : if (mtstate->ps.ps_ExprContext == NULL)
757 6347 : ExecAssignExprContext(estate, &mtstate->ps);
758 :
759 7057 : resultRelInfo->ri_projectNew =
760 7057 : ExecBuildUpdateProjection(subplan->targetlist,
761 : false, /* subplan did the evaluation */
762 : updateColnos,
763 : relDesc,
764 : mtstate->ps.ps_ExprContext,
765 : resultRelInfo->ri_newTupleSlot,
766 : &mtstate->ps);
767 :
768 7057 : resultRelInfo->ri_projectNewInfoValid = true;
769 7057 : }
770 :
771 : /*
772 : * ExecGetInsertNewTuple
773 : * This prepares a "new" tuple ready to be inserted into given result
774 : * relation, by removing any junk columns of the plan's output tuple
775 : * and (if necessary) coercing the tuple to the right tuple format.
776 : */
777 : static TupleTableSlot *
778 6139020 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
779 : TupleTableSlot *planSlot)
780 : {
781 6139020 : ProjectionInfo *newProj = relinfo->ri_projectNew;
782 : ExprContext *econtext;
783 :
784 : /*
785 : * If there's no projection to be done, just make sure the slot is of the
786 : * right type for the target rel. If the planSlot is the right type we
787 : * can use it as-is, else copy the data into ri_newTupleSlot.
788 : */
789 6139020 : if (newProj == NULL)
790 : {
791 6139020 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
792 : {
793 5735229 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
794 5735229 : return relinfo->ri_newTupleSlot;
795 : }
796 : else
797 403791 : return planSlot;
798 : }
799 :
800 : /*
801 : * Else project; since the projection output slot is ri_newTupleSlot, this
802 : * will also fix any slot-type problem.
803 : *
804 : * Note: currently, this is dead code, because INSERT cases don't receive
805 : * any junk columns so there's never a projection to be done.
806 : */
807 0 : econtext = newProj->pi_exprContext;
808 0 : econtext->ecxt_outertuple = planSlot;
809 0 : return ExecProject(newProj);
810 : }
811 :
812 : /*
813 : * ExecGetUpdateNewTuple
814 : * This prepares a "new" tuple by combining an UPDATE subplan's output
815 : * tuple (which contains values of changed columns) with unchanged
816 : * columns taken from the old tuple.
817 : *
818 : * The subplan tuple might also contain junk columns, which are ignored.
819 : * Note that the projection also ensures we have a slot of the right type.
820 : */
821 : TupleTableSlot *
822 160071 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
823 : TupleTableSlot *planSlot,
824 : TupleTableSlot *oldSlot)
825 : {
826 160071 : ProjectionInfo *newProj = relinfo->ri_projectNew;
827 : ExprContext *econtext;
828 :
829 : /* Use a few extra Asserts to protect against outside callers */
830 : Assert(relinfo->ri_projectNewInfoValid);
831 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
832 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
833 :
834 160071 : econtext = newProj->pi_exprContext;
835 160071 : econtext->ecxt_outertuple = planSlot;
836 160071 : econtext->ecxt_scantuple = oldSlot;
837 160071 : return ExecProject(newProj);
838 : }
839 :
840 : /* ----------------------------------------------------------------
841 : * ExecInsert
842 : *
843 : * For INSERT, we have to insert the tuple into the target relation
844 : * (or partition thereof) and insert appropriate tuples into the index
845 : * relations.
846 : *
847 : * slot contains the new tuple value to be stored.
848 : *
849 : * Returns RETURNING result if any, otherwise NULL.
850 : * *inserted_tuple is the tuple that's effectively inserted;
851 : * *insert_destrel is the relation where it was inserted.
852 : * These are only set on success.
853 : *
854 : * This may change the currently active tuple conversion map in
855 : * mtstate->mt_transition_capture, so the callers must take care to
856 : * save the previous value to avoid losing track of it.
857 : * ----------------------------------------------------------------
858 : */
859 : static TupleTableSlot *
860 6140433 : ExecInsert(ModifyTableContext *context,
861 : ResultRelInfo *resultRelInfo,
862 : TupleTableSlot *slot,
863 : bool canSetTag,
864 : TupleTableSlot **inserted_tuple,
865 : ResultRelInfo **insert_destrel)
866 : {
867 6140433 : ModifyTableState *mtstate = context->mtstate;
868 6140433 : EState *estate = context->estate;
869 : Relation resultRelationDesc;
870 6140433 : List *recheckIndexes = NIL;
871 6140433 : TupleTableSlot *planSlot = context->planSlot;
872 6140433 : TupleTableSlot *result = NULL;
873 : TransitionCaptureState *ar_insert_trig_tcs;
874 6140433 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
875 6140433 : OnConflictAction onconflict = node->onConflictAction;
876 6140433 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
877 : MemoryContext oldContext;
878 :
879 : /*
880 : * If the input result relation is a partitioned table, find the leaf
881 : * partition to insert the tuple into.
882 : */
883 6140433 : if (proute)
884 : {
885 : ResultRelInfo *partRelInfo;
886 :
887 379450 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
888 : resultRelInfo, slot,
889 : &partRelInfo);
890 379339 : resultRelInfo = partRelInfo;
891 : }
892 :
893 6140322 : ExecMaterializeSlot(slot);
894 :
895 6140322 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
896 :
897 : /*
898 : * Open the table's indexes, if we have not done so already, so that we
899 : * can add new index entries for the inserted tuple.
900 : */
901 6140322 : if (resultRelationDesc->rd_rel->relhasindex &&
902 1553945 : resultRelInfo->ri_IndexRelationDescs == NULL)
903 16928 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
904 :
905 : /*
906 : * BEFORE ROW INSERT Triggers.
907 : *
908 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
909 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
910 : * violations before firing these triggers, because they can change the
911 : * values to insert. Also, they can run arbitrary user-defined code with
912 : * side-effects that we can't cancel by just not inserting the tuple.
913 : */
914 6140322 : if (resultRelInfo->ri_TrigDesc &&
915 37807 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
916 : {
917 : /* Flush any pending inserts, so rows are visible to the triggers */
918 1087 : if (estate->es_insert_pending_result_relations != NIL)
919 3 : ExecPendingInserts(estate);
920 :
921 1087 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
922 100 : return NULL; /* "do nothing" */
923 : }
924 :
925 : /* INSTEAD OF ROW INSERT Triggers */
926 6140173 : if (resultRelInfo->ri_TrigDesc &&
927 37658 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
928 : {
929 84 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
930 3 : return NULL; /* "do nothing" */
931 : }
932 6140089 : else if (resultRelInfo->ri_FdwRoutine)
933 : {
934 : /*
935 : * GENERATED expressions might reference the tableoid column, so
936 : * (re-)initialize tts_tableOid before evaluating them.
937 : */
938 1010 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
939 :
940 : /*
941 : * Compute stored generated columns
942 : */
943 1010 : if (resultRelationDesc->rd_att->constr &&
944 179 : resultRelationDesc->rd_att->constr->has_generated_stored)
945 4 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
946 : CMD_INSERT);
947 :
948 : /*
949 : * If the FDW supports batching, and batching is requested, accumulate
950 : * rows and insert them in batches. Otherwise use the per-row inserts.
951 : */
952 1010 : if (resultRelInfo->ri_BatchSize > 1)
953 : {
954 145 : bool flushed = false;
955 :
956 : /*
957 : * When we've reached the desired batch size, perform the
958 : * insertion.
959 : */
960 145 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
961 : {
962 10 : ExecBatchInsert(mtstate, resultRelInfo,
963 : resultRelInfo->ri_Slots,
964 : resultRelInfo->ri_PlanSlots,
965 : resultRelInfo->ri_NumSlots,
966 : estate, canSetTag);
967 10 : flushed = true;
968 : }
969 :
970 145 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
971 :
972 145 : if (resultRelInfo->ri_Slots == NULL)
973 : {
974 15 : resultRelInfo->ri_Slots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
975 15 : resultRelInfo->ri_PlanSlots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
976 : }
977 :
978 : /*
979 : * Initialize the batch slots. We don't know how many slots will
980 : * be needed, so we initialize them as the batch grows, and we
981 : * keep them across batches. To mitigate an inefficiency in how
982 : * resource owner handles objects with many references (as with
983 : * many slots all referencing the same tuple descriptor) we copy
984 : * the appropriate tuple descriptor for each slot.
985 : */
986 145 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
987 : {
988 72 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
989 : TupleDesc plan_tdesc =
990 72 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
991 :
992 144 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
993 72 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
994 :
995 144 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
996 72 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
997 :
998 : /* remember how many batch slots we initialized */
999 72 : resultRelInfo->ri_NumSlotsInitialized++;
1000 : }
1001 :
1002 145 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
1003 : slot);
1004 :
1005 145 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
1006 : planSlot);
1007 :
1008 : /*
1009 : * If these are the first tuples stored in the buffers, add the
1010 : * target rel and the mtstate to the
1011 : * es_insert_pending_result_relations and
1012 : * es_insert_pending_modifytables lists respectively, except in
1013 : * the case where flushing was done above, in which case they
1014 : * would already have been added to the lists, so no need to do
1015 : * this.
1016 : */
1017 145 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
1018 : {
1019 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
1020 : resultRelInfo));
1021 19 : estate->es_insert_pending_result_relations =
1022 19 : lappend(estate->es_insert_pending_result_relations,
1023 : resultRelInfo);
1024 19 : estate->es_insert_pending_modifytables =
1025 19 : lappend(estate->es_insert_pending_modifytables, mtstate);
1026 : }
1027 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
1028 : resultRelInfo));
1029 :
1030 145 : resultRelInfo->ri_NumSlots++;
1031 :
1032 145 : MemoryContextSwitchTo(oldContext);
1033 :
1034 145 : return NULL;
1035 : }
1036 :
1037 : /*
1038 : * insert into foreign table: let the FDW do it
1039 : */
1040 865 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
1041 : resultRelInfo,
1042 : slot,
1043 : planSlot);
1044 :
1045 862 : if (slot == NULL) /* "do nothing" */
1046 2 : return NULL;
1047 :
1048 : /*
1049 : * AFTER ROW Triggers or RETURNING expressions might reference the
1050 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
1051 : * them. (This covers the case where the FDW replaced the slot.)
1052 : */
1053 860 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1054 : }
1055 : else
1056 : {
1057 : WCOKind wco_kind;
1058 :
1059 : /*
1060 : * Constraints and GENERATED expressions might reference the tableoid
1061 : * column, so (re-)initialize tts_tableOid before evaluating them.
1062 : */
1063 6139079 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1064 :
1065 : /*
1066 : * Compute stored generated columns
1067 : */
1068 6139079 : if (resultRelationDesc->rd_att->constr &&
1069 1865515 : resultRelationDesc->rd_att->constr->has_generated_stored)
1070 653 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1071 : CMD_INSERT);
1072 :
1073 : /*
1074 : * Check any RLS WITH CHECK policies.
1075 : *
1076 : * Normally we should check INSERT policies. But if the insert is the
1077 : * result of a partition key update that moved the tuple to a new
1078 : * partition, we should instead check UPDATE policies, because we are
1079 : * executing policies defined on the target table, and not those
1080 : * defined on the child partitions.
1081 : *
1082 : * If we're running MERGE, we refer to the action that we're executing
1083 : * to know if we're doing an INSERT or UPDATE to a partition table.
1084 : */
1085 6139064 : if (mtstate->operation == CMD_UPDATE)
1086 400 : wco_kind = WCO_RLS_UPDATE_CHECK;
1087 6138664 : else if (mtstate->operation == CMD_MERGE)
1088 893 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1089 893 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
1090 : else
1091 6137771 : wco_kind = WCO_RLS_INSERT_CHECK;
1092 :
1093 : /*
1094 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1095 : * we are looking for at this point.
1096 : */
1097 6139064 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1098 360 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1099 :
1100 : /*
1101 : * Check the constraints of the tuple.
1102 : */
1103 6138965 : if (resultRelationDesc->rd_att->constr)
1104 1865446 : ExecConstraints(resultRelInfo, slot, estate);
1105 :
1106 : /*
1107 : * Also check the tuple against the partition constraint, if there is
1108 : * one; except that if we got here via tuple-routing, we don't need to
1109 : * if there's no BR trigger defined on the partition.
1110 : */
1111 6138606 : if (resultRelationDesc->rd_rel->relispartition &&
1112 380472 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1113 379036 : (resultRelInfo->ri_TrigDesc &&
1114 832 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1115 1540 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1116 :
1117 6138522 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1118 2118 : {
1119 : /* Perform a speculative insertion. */
1120 : uint32 specToken;
1121 : ItemPointerData conflictTid;
1122 : ItemPointerData invalidItemPtr;
1123 : bool specConflict;
1124 : List *arbiterIndexes;
1125 :
1126 5004 : ItemPointerSetInvalid(&invalidItemPtr);
1127 5004 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1128 :
1129 : /*
1130 : * Do a non-conclusive check for conflicts first.
1131 : *
1132 : * We're not holding any locks yet, so this doesn't guarantee that
1133 : * the later insert won't conflict. But it avoids leaving behind
1134 : * a lot of canceled speculative insertions, if you run a lot of
1135 : * INSERT ON CONFLICT statements that do conflict.
1136 : *
1137 : * We loop back here if we find a conflict below, either during
1138 : * the pre-check, or when we re-check after inserting the tuple
1139 : * speculatively. Better allow interrupts in case some bug makes
1140 : * this an infinite loop.
1141 : */
1142 14 : vlock:
1143 5018 : CHECK_FOR_INTERRUPTS();
1144 5018 : specConflict = false;
1145 5018 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1146 : &conflictTid, &invalidItemPtr,
1147 : arbiterIndexes))
1148 : {
1149 : /* committed conflict tuple found */
1150 2883 : if (onconflict == ONCONFLICT_UPDATE)
1151 : {
1152 : /*
1153 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1154 : * part. Be prepared to retry if the UPDATE fails because
1155 : * of another concurrent UPDATE/DELETE to the conflict
1156 : * tuple.
1157 : */
1158 2624 : TupleTableSlot *returning = NULL;
1159 :
1160 2624 : if (ExecOnConflictUpdate(context, resultRelInfo,
1161 : &conflictTid, slot, canSetTag,
1162 : &returning))
1163 : {
1164 2582 : InstrCountTuples2(&mtstate->ps, 1);
1165 2582 : return returning;
1166 : }
1167 : else
1168 3 : goto vlock;
1169 : }
1170 259 : else if (onconflict == ONCONFLICT_SELECT)
1171 : {
1172 : /*
1173 : * In case of ON CONFLICT DO SELECT, optionally lock the
1174 : * conflicting tuple, fetch it and project RETURNING on
1175 : * it. Be prepared to retry if locking fails because of a
1176 : * concurrent UPDATE/DELETE to the conflict tuple.
1177 : */
1178 147 : TupleTableSlot *returning = NULL;
1179 :
1180 147 : if (ExecOnConflictSelect(context, resultRelInfo,
1181 : &conflictTid, slot, canSetTag,
1182 : &returning))
1183 : {
1184 135 : InstrCountTuples2(&mtstate->ps, 1);
1185 135 : return returning;
1186 : }
1187 : else
1188 0 : goto vlock;
1189 : }
1190 : else
1191 : {
1192 : /*
1193 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1194 : * verify that the tuple is visible to the executor's MVCC
1195 : * snapshot at higher isolation levels.
1196 : *
1197 : * Using ExecGetReturningSlot() to store the tuple for the
1198 : * recheck isn't that pretty, but we can't trivially use
1199 : * the input slot, because it might not be of a compatible
1200 : * type. As there's no conflicting usage of
1201 : * ExecGetReturningSlot() in the DO NOTHING case...
1202 : */
1203 : Assert(onconflict == ONCONFLICT_NOTHING);
1204 112 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1205 : ExecGetReturningSlot(estate, resultRelInfo));
1206 102 : InstrCountTuples2(&mtstate->ps, 1);
1207 102 : return NULL;
1208 : }
1209 : }
1210 :
1211 : /*
1212 : * Before we start insertion proper, acquire our "speculative
1213 : * insertion lock". Others can use that to wait for us to decide
1214 : * if we're going to go ahead with the insertion, instead of
1215 : * waiting for the whole transaction to complete.
1216 : */
1217 2132 : INJECTION_POINT("exec-insert-before-insert-speculative", NULL);
1218 2132 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1219 :
1220 : /* insert the tuple, with the speculative token */
1221 2132 : table_tuple_insert_speculative(resultRelationDesc, slot,
1222 : estate->es_output_cid,
1223 : 0,
1224 : NULL,
1225 : specToken);
1226 :
1227 : /* insert index entries for tuple */
1228 2132 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1229 : estate, EIIT_NO_DUPE_ERROR,
1230 : slot, arbiterIndexes,
1231 : &specConflict);
1232 :
1233 : /* adjust the tuple's state accordingly */
1234 2129 : table_tuple_complete_speculative(resultRelationDesc, slot,
1235 2129 : specToken, !specConflict);
1236 :
1237 : /*
1238 : * Wake up anyone waiting for our decision. They will re-check
1239 : * the tuple, see that it's no longer speculative, and wait on our
1240 : * XID as if this was a regularly inserted tuple all along. Or if
1241 : * we killed the tuple, they will see it's dead, and proceed as if
1242 : * the tuple never existed.
1243 : */
1244 2129 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1245 :
1246 : /*
1247 : * If there was a conflict, start from the beginning. We'll do
1248 : * the pre-check again, which will now find the conflicting tuple
1249 : * (unless it aborts before we get there).
1250 : */
1251 2129 : if (specConflict)
1252 : {
1253 11 : list_free(recheckIndexes);
1254 11 : goto vlock;
1255 : }
1256 :
1257 : /* Since there was no insertion conflict, we're done */
1258 : }
1259 : else
1260 : {
1261 : /* insert the tuple normally */
1262 6133518 : table_tuple_insert(resultRelationDesc, slot,
1263 : estate->es_output_cid,
1264 : 0, NULL);
1265 :
1266 : /* insert index entries for tuple */
1267 6133498 : if (resultRelInfo->ri_NumIndices > 0)
1268 1548654 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo, estate,
1269 : 0, slot, NIL,
1270 : NULL);
1271 : }
1272 : }
1273 :
1274 6136256 : if (canSetTag)
1275 6135658 : (estate->es_processed)++;
1276 :
1277 : /*
1278 : * If this insert is the result of a partition key update that moved the
1279 : * tuple to a new partition, put this row into the transition NEW TABLE,
1280 : * if there is one. We need to do this separately for DELETE and INSERT
1281 : * because they happen on different tables.
1282 : */
1283 6136256 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1284 6136256 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1285 27 : && mtstate->mt_transition_capture->tcs_update_new_table)
1286 : {
1287 24 : ExecARUpdateTriggers(estate, resultRelInfo,
1288 : NULL, NULL,
1289 : NULL,
1290 : NULL,
1291 : slot,
1292 : NULL,
1293 24 : mtstate->mt_transition_capture,
1294 : false);
1295 :
1296 : /*
1297 : * We've already captured the NEW TABLE row, so make sure any AR
1298 : * INSERT trigger fired below doesn't capture it again.
1299 : */
1300 24 : ar_insert_trig_tcs = NULL;
1301 : }
1302 :
1303 : /* AFTER ROW INSERT Triggers */
1304 6136256 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1305 : ar_insert_trig_tcs);
1306 :
1307 6136255 : list_free(recheckIndexes);
1308 :
1309 : /*
1310 : * Check any WITH CHECK OPTION constraints from parent views. We are
1311 : * required to do this after testing all constraints and uniqueness
1312 : * violations per the SQL spec, so we do it after actually inserting the
1313 : * record into the heap and all indexes.
1314 : *
1315 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1316 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1317 : *
1318 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1319 : * are looking for at this point.
1320 : */
1321 6136255 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1322 224 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1323 :
1324 : /* Process RETURNING if present */
1325 6136182 : if (resultRelInfo->ri_projectReturning)
1326 : {
1327 2543 : TupleTableSlot *oldSlot = NULL;
1328 :
1329 : /*
1330 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1331 : * refers to any OLD columns, ExecDelete() will have saved the tuple
1332 : * deleted from the original partition, which we must use here to
1333 : * compute the OLD column values. Otherwise, all OLD column values
1334 : * will be NULL.
1335 : */
1336 2543 : if (context->cpDeletedSlot)
1337 : {
1338 : TupleConversionMap *tupconv_map;
1339 :
1340 : /*
1341 : * Convert the OLD tuple to the new partition's format/slot, if
1342 : * needed. Note that ExecDelete() already converted it to the
1343 : * root's partition's format/slot.
1344 : */
1345 24 : oldSlot = context->cpDeletedSlot;
1346 24 : tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1347 24 : if (tupconv_map != NULL)
1348 : {
1349 8 : oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1350 : oldSlot,
1351 : ExecGetReturningSlot(estate,
1352 : resultRelInfo));
1353 :
1354 8 : oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1355 8 : ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1356 : }
1357 : }
1358 :
1359 2543 : result = ExecProcessReturning(context, resultRelInfo, false,
1360 : oldSlot, slot, planSlot);
1361 :
1362 : /*
1363 : * For a cross-partition UPDATE, release the old tuple, first making
1364 : * sure that the result slot has a local copy of any pass-by-reference
1365 : * values.
1366 : */
1367 2537 : if (context->cpDeletedSlot)
1368 : {
1369 24 : ExecMaterializeSlot(result);
1370 24 : ExecClearTuple(oldSlot);
1371 24 : if (context->cpDeletedSlot != oldSlot)
1372 8 : ExecClearTuple(context->cpDeletedSlot);
1373 24 : context->cpDeletedSlot = NULL;
1374 : }
1375 : }
1376 :
1377 6136176 : if (inserted_tuple)
1378 415 : *inserted_tuple = slot;
1379 6136176 : if (insert_destrel)
1380 415 : *insert_destrel = resultRelInfo;
1381 :
1382 6136176 : return result;
1383 : }
1384 :
1385 : /* ----------------------------------------------------------------
1386 : * ExecBatchInsert
1387 : *
1388 : * Insert multiple tuples in an efficient way.
1389 : * Currently, this handles inserting into a foreign table without
1390 : * RETURNING clause.
1391 : * ----------------------------------------------------------------
1392 : */
1393 : static void
1394 29 : ExecBatchInsert(ModifyTableState *mtstate,
1395 : ResultRelInfo *resultRelInfo,
1396 : TupleTableSlot **slots,
1397 : TupleTableSlot **planSlots,
1398 : int numSlots,
1399 : EState *estate,
1400 : bool canSetTag)
1401 : {
1402 : int i;
1403 29 : int numInserted = numSlots;
1404 29 : TupleTableSlot *slot = NULL;
1405 : TupleTableSlot **rslots;
1406 :
1407 : /*
1408 : * insert into foreign table: let the FDW do it
1409 : */
1410 29 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1411 : resultRelInfo,
1412 : slots,
1413 : planSlots,
1414 : &numInserted);
1415 :
1416 173 : for (i = 0; i < numInserted; i++)
1417 : {
1418 145 : slot = rslots[i];
1419 :
1420 : /*
1421 : * AFTER ROW Triggers might reference the tableoid column, so
1422 : * (re-)initialize tts_tableOid before evaluating them.
1423 : */
1424 145 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1425 :
1426 : /* AFTER ROW INSERT Triggers */
1427 145 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1428 145 : mtstate->mt_transition_capture);
1429 :
1430 : /*
1431 : * Check any WITH CHECK OPTION constraints from parent views. See the
1432 : * comment in ExecInsert.
1433 : */
1434 144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1435 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1436 : }
1437 :
1438 28 : if (canSetTag && numInserted > 0)
1439 28 : estate->es_processed += numInserted;
1440 :
1441 : /* Clean up all the slots, ready for the next batch */
1442 172 : for (i = 0; i < numSlots; i++)
1443 : {
1444 144 : ExecClearTuple(slots[i]);
1445 144 : ExecClearTuple(planSlots[i]);
1446 : }
1447 28 : resultRelInfo->ri_NumSlots = 0;
1448 28 : }
1449 :
1450 : /*
1451 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1452 : */
1453 : static void
1454 18 : ExecPendingInserts(EState *estate)
1455 : {
1456 : ListCell *l1,
1457 : *l2;
1458 :
1459 36 : forboth(l1, estate->es_insert_pending_result_relations,
1460 : l2, estate->es_insert_pending_modifytables)
1461 : {
1462 19 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1463 19 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1464 :
1465 : Assert(mtstate);
1466 19 : ExecBatchInsert(mtstate, resultRelInfo,
1467 : resultRelInfo->ri_Slots,
1468 : resultRelInfo->ri_PlanSlots,
1469 : resultRelInfo->ri_NumSlots,
1470 19 : estate, mtstate->canSetTag);
1471 : }
1472 :
1473 17 : list_free(estate->es_insert_pending_result_relations);
1474 17 : list_free(estate->es_insert_pending_modifytables);
1475 17 : estate->es_insert_pending_result_relations = NIL;
1476 17 : estate->es_insert_pending_modifytables = NIL;
1477 17 : }
1478 :
1479 : /*
1480 : * ExecDeletePrologue -- subroutine for ExecDelete
1481 : *
1482 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1483 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1484 : * the delete a no-op; otherwise, return true.
1485 : */
1486 : static bool
1487 825757 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1488 : ItemPointer tupleid, HeapTuple oldtuple,
1489 : TupleTableSlot **epqreturnslot, TM_Result *result)
1490 : {
1491 825757 : if (result)
1492 799 : *result = TM_Ok;
1493 :
1494 : /* BEFORE ROW DELETE triggers */
1495 825757 : if (resultRelInfo->ri_TrigDesc &&
1496 3531 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1497 : {
1498 : /* Flush any pending inserts, so rows are visible to the triggers */
1499 173 : if (context->estate->es_insert_pending_result_relations != NIL)
1500 1 : ExecPendingInserts(context->estate);
1501 :
1502 165 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1503 : resultRelInfo, tupleid, oldtuple,
1504 : epqreturnslot, result, &context->tmfd,
1505 173 : context->mtstate->operation == CMD_MERGE);
1506 : }
1507 :
1508 825584 : return true;
1509 : }
1510 :
1511 : /*
1512 : * ExecDeleteAct -- subroutine for ExecDelete
1513 : *
1514 : * Actually delete the tuple from a plain table.
1515 : *
1516 : * Caller is in charge of doing EvalPlanQual as necessary
1517 : */
1518 : static TM_Result
1519 825668 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1520 : ItemPointer tupleid, bool changingPart)
1521 : {
1522 825668 : EState *estate = context->estate;
1523 :
1524 825668 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1525 : estate->es_output_cid,
1526 : estate->es_snapshot,
1527 : estate->es_crosscheck_snapshot,
1528 : true /* wait for commit */ ,
1529 : &context->tmfd,
1530 : changingPart);
1531 : }
1532 :
1533 : /*
1534 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1535 : *
1536 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1537 : * including the UPDATE triggers if the deletion is being done as part of a
1538 : * cross-partition tuple move.
1539 : */
1540 : static void
1541 825638 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1542 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1543 : {
1544 825638 : ModifyTableState *mtstate = context->mtstate;
1545 825638 : EState *estate = context->estate;
1546 : TransitionCaptureState *ar_delete_trig_tcs;
1547 :
1548 : /*
1549 : * If this delete is the result of a partition key update that moved the
1550 : * tuple to a new partition, put this row into the transition OLD TABLE,
1551 : * if there is one. We need to do this separately for DELETE and INSERT
1552 : * because they happen on different tables.
1553 : */
1554 825638 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1555 825638 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1556 27 : mtstate->mt_transition_capture->tcs_update_old_table)
1557 : {
1558 24 : ExecARUpdateTriggers(estate, resultRelInfo,
1559 : NULL, NULL,
1560 : tupleid, oldtuple,
1561 24 : NULL, NULL, mtstate->mt_transition_capture,
1562 : false);
1563 :
1564 : /*
1565 : * We've already captured the OLD TABLE row, so make sure any AR
1566 : * DELETE trigger fired below doesn't capture it again.
1567 : */
1568 24 : ar_delete_trig_tcs = NULL;
1569 : }
1570 :
1571 : /* AFTER ROW DELETE Triggers */
1572 825638 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1573 : ar_delete_trig_tcs, changingPart);
1574 825636 : }
1575 :
1576 : /* ----------------------------------------------------------------
1577 : * ExecDelete
1578 : *
1579 : * DELETE is like UPDATE, except that we delete the tuple and no
1580 : * index modifications are needed.
1581 : *
1582 : * When deleting from a table, tupleid identifies the tuple to delete and
1583 : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1584 : * oldtuple is passed to the triggers and identifies what to delete, and
1585 : * tupleid is invalid. When deleting from a foreign table, tupleid is
1586 : * invalid; the FDW has to figure out which row to delete using data from
1587 : * the planSlot. oldtuple is passed to foreign table triggers; it is
1588 : * NULL when the foreign table has no relevant triggers. We use
1589 : * tupleDeleted to indicate whether the tuple is actually deleted,
1590 : * callers can use it to decide whether to continue the operation. When
1591 : * this DELETE is a part of an UPDATE of partition-key, then the slot
1592 : * returned by EvalPlanQual() is passed back using output parameter
1593 : * epqreturnslot.
1594 : *
1595 : * Returns RETURNING result if any, otherwise NULL.
1596 : * ----------------------------------------------------------------
1597 : */
1598 : static TupleTableSlot *
1599 825494 : ExecDelete(ModifyTableContext *context,
1600 : ResultRelInfo *resultRelInfo,
1601 : ItemPointer tupleid,
1602 : HeapTuple oldtuple,
1603 : bool processReturning,
1604 : bool changingPart,
1605 : bool canSetTag,
1606 : TM_Result *tmresult,
1607 : bool *tupleDeleted,
1608 : TupleTableSlot **epqreturnslot)
1609 : {
1610 825494 : EState *estate = context->estate;
1611 825494 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1612 825494 : TupleTableSlot *slot = NULL;
1613 : TM_Result result;
1614 : bool saveOld;
1615 :
1616 825494 : if (tupleDeleted)
1617 536 : *tupleDeleted = false;
1618 :
1619 : /*
1620 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1621 : * done if it says we are.
1622 : */
1623 825494 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1624 : epqreturnslot, tmresult))
1625 26 : return NULL;
1626 :
1627 : /* INSTEAD OF ROW DELETE Triggers */
1628 825460 : if (resultRelInfo->ri_TrigDesc &&
1629 3463 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1630 24 : {
1631 : bool dodelete;
1632 :
1633 : Assert(oldtuple != NULL);
1634 27 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1635 :
1636 27 : if (!dodelete) /* "do nothing" */
1637 3 : return NULL;
1638 : }
1639 825433 : else if (resultRelInfo->ri_FdwRoutine)
1640 : {
1641 : /*
1642 : * delete from foreign table: let the FDW do it
1643 : *
1644 : * We offer the returning slot as a place to store RETURNING data,
1645 : * although the FDW can return some other slot if it wants.
1646 : */
1647 23 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1648 23 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1649 : resultRelInfo,
1650 : slot,
1651 : context->planSlot);
1652 :
1653 23 : if (slot == NULL) /* "do nothing" */
1654 0 : return NULL;
1655 :
1656 : /*
1657 : * RETURNING expressions might reference the tableoid column, so
1658 : * (re)initialize tts_tableOid before evaluating them.
1659 : */
1660 23 : if (TTS_EMPTY(slot))
1661 5 : ExecStoreAllNullTuple(slot);
1662 :
1663 23 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1664 : }
1665 : else
1666 : {
1667 : /*
1668 : * delete the tuple
1669 : *
1670 : * Note: if context->estate->es_crosscheck_snapshot isn't
1671 : * InvalidSnapshot, we check that the row to be deleted is visible to
1672 : * that snapshot, and throw a can't-serialize error if not. This is a
1673 : * special-case behavior needed for referential integrity updates in
1674 : * transaction-snapshot mode transactions.
1675 : */
1676 825410 : ldelete:
1677 825414 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1678 :
1679 825396 : if (tmresult)
1680 519 : *tmresult = result;
1681 :
1682 825396 : switch (result)
1683 : {
1684 15 : case TM_SelfModified:
1685 :
1686 : /*
1687 : * The target tuple was already updated or deleted by the
1688 : * current command, or by a later command in the current
1689 : * transaction. The former case is possible in a join DELETE
1690 : * where multiple tuples join to the same target tuple. This
1691 : * is somewhat questionable, but Postgres has always allowed
1692 : * it: we just ignore additional deletion attempts.
1693 : *
1694 : * The latter case arises if the tuple is modified by a
1695 : * command in a BEFORE trigger, or perhaps by a command in a
1696 : * volatile function used in the query. In such situations we
1697 : * should not ignore the deletion, but it is equally unsafe to
1698 : * proceed. We don't want to discard the original DELETE
1699 : * while keeping the triggered actions based on its deletion;
1700 : * and it would be no better to allow the original DELETE
1701 : * while discarding updates that it triggered. The row update
1702 : * carries some information that might be important according
1703 : * to business rules; so throwing an error is the only safe
1704 : * course.
1705 : *
1706 : * If a trigger actually intends this type of interaction, it
1707 : * can re-execute the DELETE and then return NULL to cancel
1708 : * the outer delete.
1709 : */
1710 15 : if (context->tmfd.cmax != estate->es_output_cid)
1711 3 : ereport(ERROR,
1712 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1713 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1714 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1715 :
1716 : /* Else, already deleted by self; nothing to do */
1717 12 : return NULL;
1718 :
1719 825343 : case TM_Ok:
1720 825343 : break;
1721 :
1722 35 : case TM_Updated:
1723 : {
1724 : TupleTableSlot *inputslot;
1725 : TupleTableSlot *epqslot;
1726 :
1727 35 : if (IsolationUsesXactSnapshot())
1728 1 : ereport(ERROR,
1729 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1730 : errmsg("could not serialize access due to concurrent update")));
1731 :
1732 : /*
1733 : * Already know that we're going to need to do EPQ, so
1734 : * fetch tuple directly into the right slot.
1735 : */
1736 34 : EvalPlanQualBegin(context->epqstate);
1737 34 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1738 : resultRelInfo->ri_RangeTableIndex);
1739 :
1740 34 : result = table_tuple_lock(resultRelationDesc, tupleid,
1741 : estate->es_snapshot,
1742 : inputslot, estate->es_output_cid,
1743 : LockTupleExclusive, LockWaitBlock,
1744 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1745 : &context->tmfd);
1746 :
1747 30 : switch (result)
1748 : {
1749 27 : case TM_Ok:
1750 : Assert(context->tmfd.traversed);
1751 27 : epqslot = EvalPlanQual(context->epqstate,
1752 : resultRelationDesc,
1753 : resultRelInfo->ri_RangeTableIndex,
1754 : inputslot);
1755 27 : if (TupIsNull(epqslot))
1756 : /* Tuple not passing quals anymore, exiting... */
1757 15 : return NULL;
1758 :
1759 : /*
1760 : * If requested, skip delete and pass back the
1761 : * updated row.
1762 : */
1763 12 : if (epqreturnslot)
1764 : {
1765 8 : *epqreturnslot = epqslot;
1766 8 : return NULL;
1767 : }
1768 : else
1769 4 : goto ldelete;
1770 :
1771 2 : case TM_SelfModified:
1772 :
1773 : /*
1774 : * This can be reached when following an update
1775 : * chain from a tuple updated by another session,
1776 : * reaching a tuple that was already updated in
1777 : * this transaction. If previously updated by this
1778 : * command, ignore the delete, otherwise error
1779 : * out.
1780 : *
1781 : * See also TM_SelfModified response to
1782 : * table_tuple_delete() above.
1783 : */
1784 2 : if (context->tmfd.cmax != estate->es_output_cid)
1785 1 : ereport(ERROR,
1786 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1787 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1788 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1789 1 : return NULL;
1790 :
1791 1 : case TM_Deleted:
1792 : /* tuple already deleted; nothing to do */
1793 1 : return NULL;
1794 :
1795 0 : default:
1796 :
1797 : /*
1798 : * TM_Invisible should be impossible because we're
1799 : * waiting for updated row versions, and would
1800 : * already have errored out if the first version
1801 : * is invisible.
1802 : *
1803 : * TM_Updated should be impossible, because we're
1804 : * locking the latest version via
1805 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1806 : */
1807 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1808 : result);
1809 : return NULL;
1810 : }
1811 :
1812 : Assert(false);
1813 : break;
1814 : }
1815 :
1816 3 : case TM_Deleted:
1817 3 : if (IsolationUsesXactSnapshot())
1818 0 : ereport(ERROR,
1819 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1820 : errmsg("could not serialize access due to concurrent delete")));
1821 : /* tuple already deleted; nothing to do */
1822 3 : return NULL;
1823 :
1824 0 : default:
1825 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1826 : result);
1827 : return NULL;
1828 : }
1829 :
1830 : /*
1831 : * Note: Normally one would think that we have to delete index tuples
1832 : * associated with the heap tuple now...
1833 : *
1834 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1835 : * take care of it later. We can't delete index tuples immediately
1836 : * anyway, since the tuple is still visible to other transactions.
1837 : */
1838 : }
1839 :
1840 825390 : if (canSetTag)
1841 824783 : (estate->es_processed)++;
1842 :
1843 : /* Tell caller that the delete actually happened. */
1844 825390 : if (tupleDeleted)
1845 493 : *tupleDeleted = true;
1846 :
1847 825390 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1848 :
1849 : /*
1850 : * Process RETURNING if present and if requested.
1851 : *
1852 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1853 : * refers to any OLD column values, save the old tuple here for later
1854 : * processing of the RETURNING list by ExecInsert().
1855 : */
1856 825463 : saveOld = changingPart && resultRelInfo->ri_projectReturning &&
1857 75 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD;
1858 :
1859 825388 : if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
1860 : {
1861 : /*
1862 : * We have to put the target tuple into a slot, which means first we
1863 : * gotta fetch it. We can use the trigger tuple slot.
1864 : */
1865 : TupleTableSlot *rslot;
1866 :
1867 506 : if (resultRelInfo->ri_FdwRoutine)
1868 : {
1869 : /* FDW must have provided a slot containing the deleted row */
1870 : Assert(!TupIsNull(slot));
1871 : }
1872 : else
1873 : {
1874 499 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1875 499 : if (oldtuple != NULL)
1876 : {
1877 12 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1878 : }
1879 : else
1880 : {
1881 487 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1882 : SnapshotAny, slot))
1883 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1884 : }
1885 : }
1886 :
1887 : /*
1888 : * If required, save the old tuple for later processing of the
1889 : * RETURNING list by ExecInsert().
1890 : */
1891 506 : if (saveOld)
1892 : {
1893 : TupleConversionMap *tupconv_map;
1894 :
1895 : /*
1896 : * Convert the tuple into the root partition's format/slot, if
1897 : * needed. ExecInsert() will then convert it to the new
1898 : * partition's format/slot, if necessary.
1899 : */
1900 24 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1901 24 : if (tupconv_map != NULL)
1902 : {
1903 10 : ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
1904 10 : TupleTableSlot *oldSlot = slot;
1905 :
1906 10 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1907 : slot,
1908 : ExecGetReturningSlot(estate,
1909 : rootRelInfo));
1910 :
1911 10 : slot->tts_tableOid = oldSlot->tts_tableOid;
1912 10 : ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
1913 : }
1914 :
1915 24 : context->cpDeletedSlot = slot;
1916 :
1917 24 : return NULL;
1918 : }
1919 :
1920 482 : rslot = ExecProcessReturning(context, resultRelInfo, true,
1921 : slot, NULL, context->planSlot);
1922 :
1923 : /*
1924 : * Before releasing the target tuple again, make sure rslot has a
1925 : * local copy of any pass-by-reference values.
1926 : */
1927 482 : ExecMaterializeSlot(rslot);
1928 :
1929 482 : ExecClearTuple(slot);
1930 :
1931 482 : return rslot;
1932 : }
1933 :
1934 824882 : return NULL;
1935 : }
1936 :
1937 : /*
1938 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1939 : *
1940 : * This works by first deleting the old tuple from the current partition,
1941 : * followed by inserting the new tuple into the root parent table, that is,
1942 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1943 : * correct partition.
1944 : *
1945 : * Returns true if the tuple has been successfully moved, or if it's found
1946 : * that the tuple was concurrently deleted so there's nothing more to do
1947 : * for the caller.
1948 : *
1949 : * False is returned if the tuple we're trying to move is found to have been
1950 : * concurrently updated. In that case, the caller must check if the updated
1951 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1952 : * this function again or perform a regular update accordingly. For MERGE,
1953 : * the updated tuple is not returned in *retry_slot; it has its own retry
1954 : * logic.
1955 : */
1956 : static bool
1957 560 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1958 : ResultRelInfo *resultRelInfo,
1959 : ItemPointer tupleid, HeapTuple oldtuple,
1960 : TupleTableSlot *slot,
1961 : bool canSetTag,
1962 : UpdateContext *updateCxt,
1963 : TM_Result *tmresult,
1964 : TupleTableSlot **retry_slot,
1965 : TupleTableSlot **inserted_tuple,
1966 : ResultRelInfo **insert_destrel)
1967 : {
1968 560 : ModifyTableState *mtstate = context->mtstate;
1969 560 : EState *estate = mtstate->ps.state;
1970 : TupleConversionMap *tupconv_map;
1971 : bool tuple_deleted;
1972 560 : TupleTableSlot *epqslot = NULL;
1973 :
1974 560 : context->cpDeletedSlot = NULL;
1975 560 : context->cpUpdateReturningSlot = NULL;
1976 560 : *retry_slot = NULL;
1977 :
1978 : /*
1979 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1980 : * to migrate to a different partition. Maybe this can be implemented
1981 : * some day, but it seems a fringe feature with little redeeming value.
1982 : */
1983 560 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1984 0 : ereport(ERROR,
1985 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1986 : errmsg("invalid ON UPDATE specification"),
1987 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1988 :
1989 : /*
1990 : * When an UPDATE is run directly on a leaf partition, simply fail with a
1991 : * partition constraint violation error.
1992 : */
1993 560 : if (resultRelInfo == mtstate->rootResultRelInfo)
1994 24 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1995 :
1996 : /* Initialize tuple routing info if not already done. */
1997 536 : if (mtstate->mt_partition_tuple_routing == NULL)
1998 : {
1999 343 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
2000 : MemoryContext oldcxt;
2001 :
2002 : /* Things built here have to last for the query duration. */
2003 343 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
2004 :
2005 343 : mtstate->mt_partition_tuple_routing =
2006 343 : ExecSetupPartitionTupleRouting(estate, rootRel);
2007 :
2008 : /*
2009 : * Before a partition's tuple can be re-routed, it must first be
2010 : * converted to the root's format, so we'll need a slot for storing
2011 : * such tuples.
2012 : */
2013 : Assert(mtstate->mt_root_tuple_slot == NULL);
2014 343 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
2015 :
2016 343 : MemoryContextSwitchTo(oldcxt);
2017 : }
2018 :
2019 : /*
2020 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
2021 : * We want to return rows from INSERT.
2022 : */
2023 536 : ExecDelete(context, resultRelInfo,
2024 : tupleid, oldtuple,
2025 : false, /* processReturning */
2026 : true, /* changingPart */
2027 : false, /* canSetTag */
2028 : tmresult, &tuple_deleted, &epqslot);
2029 :
2030 : /*
2031 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
2032 : * it was already deleted by self, or it was concurrently deleted by
2033 : * another transaction), then we should skip the insert as well;
2034 : * otherwise, an UPDATE could cause an increase in the total number of
2035 : * rows across all partitions, which is clearly wrong.
2036 : *
2037 : * For a normal UPDATE, the case where the tuple has been the subject of a
2038 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
2039 : * machinery, but for an UPDATE that we've translated into a DELETE from
2040 : * this partition and an INSERT into some other partition, that's not
2041 : * available, because CTID chains can't span relation boundaries. We
2042 : * mimic the semantics to a limited extent by skipping the INSERT if the
2043 : * DELETE fails to find a tuple. This ensures that two concurrent
2044 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
2045 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
2046 : * it.
2047 : */
2048 533 : if (!tuple_deleted)
2049 : {
2050 : /*
2051 : * epqslot will be typically NULL. But when ExecDelete() finds that
2052 : * another transaction has concurrently updated the same row, it
2053 : * re-fetches the row, skips the delete, and epqslot is set to the
2054 : * re-fetched tuple slot. In that case, we need to do all the checks
2055 : * again. For MERGE, we leave everything to the caller (it must do
2056 : * additional rechecking, and might end up executing a different
2057 : * action entirely).
2058 : */
2059 40 : if (mtstate->operation == CMD_MERGE)
2060 19 : return *tmresult == TM_Ok;
2061 21 : else if (TupIsNull(epqslot))
2062 18 : return true;
2063 : else
2064 : {
2065 : /* Fetch the most recent version of old tuple. */
2066 : TupleTableSlot *oldSlot;
2067 :
2068 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
2069 3 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2070 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
2071 3 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2072 3 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2073 : tupleid,
2074 : SnapshotAny,
2075 : oldSlot))
2076 0 : elog(ERROR, "failed to fetch tuple being updated");
2077 : /* and project the new tuple to retry the UPDATE with */
2078 3 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2079 : oldSlot);
2080 3 : return false;
2081 : }
2082 : }
2083 :
2084 : /*
2085 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
2086 : * convert the tuple into root's tuple descriptor if needed, since
2087 : * ExecInsert() starts the search from root.
2088 : */
2089 493 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2090 493 : if (tupconv_map != NULL)
2091 158 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2092 : slot,
2093 : mtstate->mt_root_tuple_slot);
2094 :
2095 : /* Tuple routing starts from the root table. */
2096 429 : context->cpUpdateReturningSlot =
2097 493 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2098 : inserted_tuple, insert_destrel);
2099 :
2100 : /*
2101 : * Reset the transition state that may possibly have been written by
2102 : * INSERT.
2103 : */
2104 429 : if (mtstate->mt_transition_capture)
2105 27 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
2106 :
2107 : /* We're done moving. */
2108 429 : return true;
2109 : }
2110 :
2111 : /*
2112 : * ExecUpdatePrologue -- subroutine for ExecUpdate
2113 : *
2114 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
2115 : * triggers. We return false if one of them makes the update a no-op;
2116 : * otherwise, return true.
2117 : */
2118 : static bool
2119 163689 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2120 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2121 : TM_Result *result)
2122 : {
2123 163689 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2124 :
2125 163689 : if (result)
2126 1096 : *result = TM_Ok;
2127 :
2128 163689 : ExecMaterializeSlot(slot);
2129 :
2130 : /*
2131 : * Open the table's indexes, if we have not done so already, so that we
2132 : * can add new index entries for the updated tuple.
2133 : */
2134 163689 : if (resultRelationDesc->rd_rel->relhasindex &&
2135 118137 : resultRelInfo->ri_IndexRelationDescs == NULL)
2136 4787 : ExecOpenIndices(resultRelInfo, false);
2137 :
2138 : /* BEFORE ROW UPDATE triggers */
2139 163689 : if (resultRelInfo->ri_TrigDesc &&
2140 3173 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
2141 : {
2142 : /* Flush any pending inserts, so rows are visible to the triggers */
2143 1305 : if (context->estate->es_insert_pending_result_relations != NIL)
2144 1 : ExecPendingInserts(context->estate);
2145 :
2146 1293 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
2147 : resultRelInfo, tupleid, oldtuple, slot,
2148 : result, &context->tmfd,
2149 1305 : context->mtstate->operation == CMD_MERGE);
2150 : }
2151 :
2152 162384 : return true;
2153 : }
2154 :
2155 : /*
2156 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2157 : *
2158 : * Apply the final modifications to the tuple slot before the update.
2159 : * (This is split out because we also need it in the foreign-table code path.)
2160 : */
2161 : static void
2162 163555 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
2163 : TupleTableSlot *slot,
2164 : EState *estate)
2165 : {
2166 163555 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2167 :
2168 : /*
2169 : * Constraints and GENERATED expressions might reference the tableoid
2170 : * column, so (re-)initialize tts_tableOid before evaluating them.
2171 : */
2172 163555 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2173 :
2174 : /*
2175 : * Compute stored generated columns
2176 : */
2177 163555 : if (resultRelationDesc->rd_att->constr &&
2178 99702 : resultRelationDesc->rd_att->constr->has_generated_stored)
2179 144 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2180 : CMD_UPDATE);
2181 163555 : }
2182 :
2183 : /*
2184 : * ExecUpdateAct -- subroutine for ExecUpdate
2185 : *
2186 : * Actually update the tuple, when operating on a plain table. If the
2187 : * table is a partition, and the command was called referencing an ancestor
2188 : * partitioned table, this routine migrates the resulting tuple to another
2189 : * partition.
2190 : *
2191 : * The caller is in charge of keeping indexes current as necessary. The
2192 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
2193 : * be concurrently updated. However, in case of a cross-partition update,
2194 : * this routine does it.
2195 : */
2196 : static TM_Result
2197 163457 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2198 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2199 : bool canSetTag, UpdateContext *updateCxt)
2200 : {
2201 163457 : EState *estate = context->estate;
2202 163457 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2203 : bool partition_constraint_failed;
2204 : TM_Result result;
2205 :
2206 163457 : updateCxt->crossPartUpdate = false;
2207 :
2208 : /*
2209 : * If we move the tuple to a new partition, we loop back here to recompute
2210 : * GENERATED values (which are allowed to be different across partitions)
2211 : * and recheck any RLS policies and constraints. We do not fire any
2212 : * BEFORE triggers of the new partition, however.
2213 : */
2214 163460 : lreplace:
2215 : /* Fill in GENERATEd columns */
2216 163460 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2217 :
2218 : /* ensure slot is independent, consider e.g. EPQ */
2219 163460 : ExecMaterializeSlot(slot);
2220 :
2221 : /*
2222 : * If partition constraint fails, this row might get moved to another
2223 : * partition, in which case we should check the RLS CHECK policy just
2224 : * before inserting into the new partition, rather than doing it here.
2225 : * This is because a trigger on that partition might again change the row.
2226 : * So skip the WCO checks if the partition constraint fails.
2227 : */
2228 163460 : partition_constraint_failed =
2229 164853 : resultRelationDesc->rd_rel->relispartition &&
2230 1393 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2231 :
2232 : /* Check any RLS UPDATE WITH CHECK policies */
2233 163460 : if (!partition_constraint_failed &&
2234 162900 : resultRelInfo->ri_WithCheckOptions != NIL)
2235 : {
2236 : /*
2237 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2238 : * we are looking for at this point.
2239 : */
2240 267 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2241 : resultRelInfo, slot, estate);
2242 : }
2243 :
2244 : /*
2245 : * If a partition check failed, try to move the row into the right
2246 : * partition.
2247 : */
2248 163433 : if (partition_constraint_failed)
2249 : {
2250 : TupleTableSlot *inserted_tuple,
2251 : *retry_slot;
2252 560 : ResultRelInfo *insert_destrel = NULL;
2253 :
2254 : /*
2255 : * ExecCrossPartitionUpdate will first DELETE the row from the
2256 : * partition it's currently in and then insert it back into the root
2257 : * table, which will re-route it to the correct partition. However,
2258 : * if the tuple has been concurrently updated, a retry is needed.
2259 : */
2260 560 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2261 : tupleid, oldtuple, slot,
2262 : canSetTag, updateCxt,
2263 : &result,
2264 : &retry_slot,
2265 : &inserted_tuple,
2266 : &insert_destrel))
2267 : {
2268 : /* success! */
2269 459 : updateCxt->crossPartUpdate = true;
2270 :
2271 : /*
2272 : * If the partitioned table being updated is referenced in foreign
2273 : * keys, queue up trigger events to check that none of them were
2274 : * violated. No special treatment is needed in
2275 : * non-cross-partition update situations, because the leaf
2276 : * partition's AR update triggers will take care of that. During
2277 : * cross-partition updates implemented as delete on the source
2278 : * partition followed by insert on the destination partition,
2279 : * AR-UPDATE triggers of the root table (that is, the table
2280 : * mentioned in the query) must be fired.
2281 : *
2282 : * NULL insert_destrel means that the move failed to occur, that
2283 : * is, the update failed, so no need to anything in that case.
2284 : */
2285 459 : if (insert_destrel &&
2286 415 : resultRelInfo->ri_TrigDesc &&
2287 184 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2288 153 : ExecCrossPartitionUpdateForeignKey(context,
2289 : resultRelInfo,
2290 : insert_destrel,
2291 : tupleid, slot,
2292 : inserted_tuple);
2293 :
2294 463 : return TM_Ok;
2295 : }
2296 :
2297 : /*
2298 : * No luck, a retry is needed. If running MERGE, we do not do so
2299 : * here; instead let it handle that on its own rules.
2300 : */
2301 10 : if (context->mtstate->operation == CMD_MERGE)
2302 7 : return result;
2303 :
2304 : /*
2305 : * ExecCrossPartitionUpdate installed an updated version of the new
2306 : * tuple in the retry slot; start over.
2307 : */
2308 3 : slot = retry_slot;
2309 3 : goto lreplace;
2310 : }
2311 :
2312 : /*
2313 : * Check the constraints of the tuple. We've already checked the
2314 : * partition constraint above; however, we must still ensure the tuple
2315 : * passes all other constraints, so we will call ExecConstraints() and
2316 : * have it validate all remaining checks.
2317 : */
2318 162873 : if (resultRelationDesc->rd_att->constr)
2319 99388 : ExecConstraints(resultRelInfo, slot, estate);
2320 :
2321 : /*
2322 : * replace the heap tuple
2323 : *
2324 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2325 : * the row to be updated is visible to that snapshot, and throw a
2326 : * can't-serialize error if not. This is a special-case behavior needed
2327 : * for referential integrity updates in transaction-snapshot mode
2328 : * transactions.
2329 : */
2330 162836 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2331 : estate->es_output_cid,
2332 : estate->es_snapshot,
2333 : estate->es_crosscheck_snapshot,
2334 : true /* wait for commit */ ,
2335 : &context->tmfd, &updateCxt->lockmode,
2336 : &updateCxt->updateIndexes);
2337 :
2338 162824 : return result;
2339 : }
2340 :
2341 : /*
2342 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2343 : *
2344 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2345 : * returns indicating that the tuple was updated.
2346 : */
2347 : static void
2348 162842 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2349 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2350 : HeapTuple oldtuple, TupleTableSlot *slot)
2351 : {
2352 162842 : ModifyTableState *mtstate = context->mtstate;
2353 162842 : List *recheckIndexes = NIL;
2354 :
2355 : /* insert index entries for tuple if necessary */
2356 162842 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2357 : {
2358 89251 : bits32 flags = EIIT_IS_UPDATE;
2359 :
2360 89251 : if (updateCxt->updateIndexes == TU_Summarizing)
2361 1641 : flags |= EIIT_ONLY_SUMMARIZING;
2362 89251 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo, context->estate,
2363 : flags, slot, NIL,
2364 : NULL);
2365 : }
2366 :
2367 : /* AFTER ROW UPDATE Triggers */
2368 162796 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2369 : NULL, NULL,
2370 : tupleid, oldtuple, slot,
2371 : recheckIndexes,
2372 162796 : mtstate->operation == CMD_INSERT ?
2373 : mtstate->mt_oc_transition_capture :
2374 : mtstate->mt_transition_capture,
2375 : false);
2376 :
2377 162794 : list_free(recheckIndexes);
2378 :
2379 : /*
2380 : * Check any WITH CHECK OPTION constraints from parent views. We are
2381 : * required to do this after testing all constraints and uniqueness
2382 : * violations per the SQL spec, so we do it after actually updating the
2383 : * record in the heap and all indexes.
2384 : *
2385 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2386 : * are looking for at this point.
2387 : */
2388 162794 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2389 254 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2390 : slot, context->estate);
2391 162753 : }
2392 :
2393 : /*
2394 : * Queues up an update event using the target root partitioned table's
2395 : * trigger to check that a cross-partition update hasn't broken any foreign
2396 : * keys pointing into it.
2397 : */
2398 : static void
2399 153 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2400 : ResultRelInfo *sourcePartInfo,
2401 : ResultRelInfo *destPartInfo,
2402 : ItemPointer tupleid,
2403 : TupleTableSlot *oldslot,
2404 : TupleTableSlot *newslot)
2405 : {
2406 : ListCell *lc;
2407 : ResultRelInfo *rootRelInfo;
2408 : List *ancestorRels;
2409 :
2410 153 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2411 153 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2412 :
2413 : /*
2414 : * For any foreign keys that point directly into a non-root ancestors of
2415 : * the source partition, we can in theory fire an update event to enforce
2416 : * those constraints using their triggers, if we could tell that both the
2417 : * source and the destination partitions are under the same ancestor. But
2418 : * for now, we simply report an error that those cannot be enforced.
2419 : */
2420 333 : foreach(lc, ancestorRels)
2421 : {
2422 183 : ResultRelInfo *rInfo = lfirst(lc);
2423 183 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2424 183 : bool has_noncloned_fkey = false;
2425 :
2426 : /* Root ancestor's triggers will be processed. */
2427 183 : if (rInfo == rootRelInfo)
2428 150 : continue;
2429 :
2430 33 : if (trigdesc && trigdesc->trig_update_after_row)
2431 : {
2432 114 : for (int i = 0; i < trigdesc->numtriggers; i++)
2433 : {
2434 84 : Trigger *trig = &trigdesc->triggers[i];
2435 :
2436 87 : if (!trig->tgisclone &&
2437 3 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2438 : {
2439 3 : has_noncloned_fkey = true;
2440 3 : break;
2441 : }
2442 : }
2443 : }
2444 :
2445 33 : if (has_noncloned_fkey)
2446 3 : ereport(ERROR,
2447 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2448 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2449 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2450 : RelationGetRelationName(rInfo->ri_RelationDesc),
2451 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2452 : errhint("Consider defining the foreign key on table \"%s\".",
2453 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2454 : }
2455 :
2456 : /* Perform the root table's triggers. */
2457 150 : ExecARUpdateTriggers(context->estate,
2458 : rootRelInfo, sourcePartInfo, destPartInfo,
2459 : tupleid, NULL, newslot, NIL, NULL, true);
2460 150 : }
2461 :
2462 : /* ----------------------------------------------------------------
2463 : * ExecUpdate
2464 : *
2465 : * note: we can't run UPDATE queries with transactions
2466 : * off because UPDATEs are actually INSERTs and our
2467 : * scan will mistakenly loop forever, updating the tuple
2468 : * it just inserted.. This should be fixed but until it
2469 : * is, we don't want to get stuck in an infinite loop
2470 : * which corrupts your database..
2471 : *
2472 : * When updating a table, tupleid identifies the tuple to update and
2473 : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2474 : * oldtuple is passed to the triggers and identifies what to update, and
2475 : * tupleid is invalid. When updating a foreign table, tupleid is
2476 : * invalid; the FDW has to figure out which row to update using data from
2477 : * the planSlot. oldtuple is passed to foreign table triggers; it is
2478 : * NULL when the foreign table has no relevant triggers.
2479 : *
2480 : * oldSlot contains the old tuple value.
2481 : * slot contains the new tuple value to be stored.
2482 : * planSlot is the output of the ModifyTable's subplan; we use it
2483 : * to access values from other input tables (for RETURNING),
2484 : * row-ID junk columns, etc.
2485 : *
2486 : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2487 : * had identified the tuple to update, it will identify the tuple
2488 : * actually updated after EvalPlanQual.
2489 : * ----------------------------------------------------------------
2490 : */
2491 : static TupleTableSlot *
2492 162593 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2493 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2494 : TupleTableSlot *slot, bool canSetTag)
2495 : {
2496 162593 : EState *estate = context->estate;
2497 162593 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2498 162593 : UpdateContext updateCxt = {0};
2499 : TM_Result result;
2500 :
2501 : /*
2502 : * abort the operation if not running transactions
2503 : */
2504 162593 : if (IsBootstrapProcessingMode())
2505 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2506 :
2507 : /*
2508 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2509 : * done if it says we are.
2510 : */
2511 162593 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2512 66 : return NULL;
2513 :
2514 : /* INSTEAD OF ROW UPDATE Triggers */
2515 162515 : if (resultRelInfo->ri_TrigDesc &&
2516 2911 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2517 : {
2518 63 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2519 : oldtuple, slot))
2520 9 : return NULL; /* "do nothing" */
2521 : }
2522 162452 : else if (resultRelInfo->ri_FdwRoutine)
2523 : {
2524 : /* Fill in GENERATEd columns */
2525 95 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2526 :
2527 : /*
2528 : * update in foreign table: let the FDW do it
2529 : */
2530 95 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2531 : resultRelInfo,
2532 : slot,
2533 : context->planSlot);
2534 :
2535 95 : if (slot == NULL) /* "do nothing" */
2536 1 : return NULL;
2537 :
2538 : /*
2539 : * AFTER ROW Triggers or RETURNING expressions might reference the
2540 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2541 : * them. (This covers the case where the FDW replaced the slot.)
2542 : */
2543 94 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2544 : }
2545 : else
2546 : {
2547 : ItemPointerData lockedtid;
2548 :
2549 : /*
2550 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2551 : * must loop back here to try again. (We don't need to redo triggers,
2552 : * however. If there are any BEFORE triggers then trigger.c will have
2553 : * done table_tuple_lock to lock the correct tuple, so there's no need
2554 : * to do them again.)
2555 : */
2556 162357 : redo_act:
2557 162410 : lockedtid = *tupleid;
2558 162410 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2559 : canSetTag, &updateCxt);
2560 :
2561 : /*
2562 : * If ExecUpdateAct reports that a cross-partition update was done,
2563 : * then the RETURNING tuple (if any) has been projected and there's
2564 : * nothing else for us to do.
2565 : */
2566 162252 : if (updateCxt.crossPartUpdate)
2567 453 : return context->cpUpdateReturningSlot;
2568 :
2569 161865 : switch (result)
2570 : {
2571 42 : case TM_SelfModified:
2572 :
2573 : /*
2574 : * The target tuple was already updated or deleted by the
2575 : * current command, or by a later command in the current
2576 : * transaction. The former case is possible in a join UPDATE
2577 : * where multiple tuples join to the same target tuple. This
2578 : * is pretty questionable, but Postgres has always allowed it:
2579 : * we just execute the first update action and ignore
2580 : * additional update attempts.
2581 : *
2582 : * The latter case arises if the tuple is modified by a
2583 : * command in a BEFORE trigger, or perhaps by a command in a
2584 : * volatile function used in the query. In such situations we
2585 : * should not ignore the update, but it is equally unsafe to
2586 : * proceed. We don't want to discard the original UPDATE
2587 : * while keeping the triggered actions based on it; and we
2588 : * have no principled way to merge this update with the
2589 : * previous ones. So throwing an error is the only safe
2590 : * course.
2591 : *
2592 : * If a trigger actually intends this type of interaction, it
2593 : * can re-execute the UPDATE (assuming it can figure out how)
2594 : * and then return NULL to cancel the outer update.
2595 : */
2596 42 : if (context->tmfd.cmax != estate->es_output_cid)
2597 3 : ereport(ERROR,
2598 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2599 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2600 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2601 :
2602 : /* Else, already updated by self; nothing to do */
2603 39 : return NULL;
2604 :
2605 161738 : case TM_Ok:
2606 161738 : break;
2607 :
2608 81 : case TM_Updated:
2609 : {
2610 : TupleTableSlot *inputslot;
2611 : TupleTableSlot *epqslot;
2612 :
2613 81 : if (IsolationUsesXactSnapshot())
2614 2 : ereport(ERROR,
2615 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2616 : errmsg("could not serialize access due to concurrent update")));
2617 :
2618 : /*
2619 : * Already know that we're going to need to do EPQ, so
2620 : * fetch tuple directly into the right slot.
2621 : */
2622 79 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2623 : resultRelInfo->ri_RangeTableIndex);
2624 :
2625 79 : result = table_tuple_lock(resultRelationDesc, tupleid,
2626 : estate->es_snapshot,
2627 : inputslot, estate->es_output_cid,
2628 : updateCxt.lockmode, LockWaitBlock,
2629 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2630 : &context->tmfd);
2631 :
2632 77 : switch (result)
2633 : {
2634 72 : case TM_Ok:
2635 : Assert(context->tmfd.traversed);
2636 :
2637 72 : epqslot = EvalPlanQual(context->epqstate,
2638 : resultRelationDesc,
2639 : resultRelInfo->ri_RangeTableIndex,
2640 : inputslot);
2641 72 : if (TupIsNull(epqslot))
2642 : /* Tuple not passing quals anymore, exiting... */
2643 19 : return NULL;
2644 :
2645 : /* Make sure ri_oldTupleSlot is initialized. */
2646 53 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2647 0 : ExecInitUpdateProjection(context->mtstate,
2648 : resultRelInfo);
2649 :
2650 53 : if (resultRelInfo->ri_needLockTagTuple)
2651 : {
2652 1 : UnlockTuple(resultRelationDesc,
2653 : &lockedtid, InplaceUpdateTupleLock);
2654 1 : LockTuple(resultRelationDesc,
2655 : tupleid, InplaceUpdateTupleLock);
2656 : }
2657 :
2658 : /* Fetch the most recent version of old tuple. */
2659 53 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2660 53 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2661 : tupleid,
2662 : SnapshotAny,
2663 : oldSlot))
2664 0 : elog(ERROR, "failed to fetch tuple being updated");
2665 53 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2666 : epqslot, oldSlot);
2667 53 : goto redo_act;
2668 :
2669 1 : case TM_Deleted:
2670 : /* tuple already deleted; nothing to do */
2671 1 : return NULL;
2672 :
2673 4 : case TM_SelfModified:
2674 :
2675 : /*
2676 : * This can be reached when following an update
2677 : * chain from a tuple updated by another session,
2678 : * reaching a tuple that was already updated in
2679 : * this transaction. If previously modified by
2680 : * this command, ignore the redundant update,
2681 : * otherwise error out.
2682 : *
2683 : * See also TM_SelfModified response to
2684 : * table_tuple_update() above.
2685 : */
2686 4 : if (context->tmfd.cmax != estate->es_output_cid)
2687 1 : ereport(ERROR,
2688 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2689 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2690 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2691 3 : return NULL;
2692 :
2693 0 : default:
2694 : /* see table_tuple_lock call in ExecDelete() */
2695 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2696 : result);
2697 : return NULL;
2698 : }
2699 : }
2700 :
2701 : break;
2702 :
2703 4 : case TM_Deleted:
2704 4 : if (IsolationUsesXactSnapshot())
2705 0 : ereport(ERROR,
2706 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2707 : errmsg("could not serialize access due to concurrent delete")));
2708 : /* tuple already deleted; nothing to do */
2709 4 : return NULL;
2710 :
2711 0 : default:
2712 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2713 : result);
2714 : return NULL;
2715 : }
2716 : }
2717 :
2718 161880 : if (canSetTag)
2719 161571 : (estate->es_processed)++;
2720 :
2721 161880 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2722 : slot);
2723 :
2724 : /* Process RETURNING if present */
2725 161797 : if (resultRelInfo->ri_projectReturning)
2726 1215 : return ExecProcessReturning(context, resultRelInfo, false,
2727 : oldSlot, slot, context->planSlot);
2728 :
2729 160582 : return NULL;
2730 : }
2731 :
2732 : /*
2733 : * ExecOnConflictLockRow --- lock the row for ON CONFLICT DO SELECT/UPDATE
2734 : *
2735 : * Try to lock tuple for update as part of speculative insertion for ON
2736 : * CONFLICT DO UPDATE or ON CONFLICT DO SELECT FOR UPDATE/SHARE.
2737 : *
2738 : * Returns true if the row is successfully locked, or false if the caller must
2739 : * retry the INSERT from scratch.
2740 : */
2741 : static bool
2742 2679 : ExecOnConflictLockRow(ModifyTableContext *context,
2743 : TupleTableSlot *existing,
2744 : ItemPointer conflictTid,
2745 : Relation relation,
2746 : LockTupleMode lockmode,
2747 : bool isUpdate)
2748 : {
2749 : TM_FailureData tmfd;
2750 : TM_Result test;
2751 : Datum xminDatum;
2752 : TransactionId xmin;
2753 : bool isnull;
2754 :
2755 : /*
2756 : * Lock tuple with lockmode. Don't follow updates when tuple cannot be
2757 : * locked without doing so. A row locking conflict here means our
2758 : * previous conclusion that the tuple is conclusively committed is not
2759 : * true anymore.
2760 : */
2761 2679 : test = table_tuple_lock(relation, conflictTid,
2762 2679 : context->estate->es_snapshot,
2763 2679 : existing, context->estate->es_output_cid,
2764 : lockmode, LockWaitBlock, 0,
2765 : &tmfd);
2766 2679 : switch (test)
2767 : {
2768 2655 : case TM_Ok:
2769 : /* success! */
2770 2655 : break;
2771 :
2772 21 : case TM_Invisible:
2773 :
2774 : /*
2775 : * This can occur when a just inserted tuple is updated again in
2776 : * the same command. E.g. because multiple rows with the same
2777 : * conflicting key values are inserted.
2778 : *
2779 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2780 : * case. We do not want to proceed because it would lead to the
2781 : * same row being updated a second time in some unspecified order,
2782 : * and in contrast to plain UPDATEs there's no historical behavior
2783 : * to break.
2784 : *
2785 : * It is the user's responsibility to prevent this situation from
2786 : * occurring. These problems are why the SQL standard similarly
2787 : * specifies that for SQL MERGE, an exception must be raised in
2788 : * the event of an attempt to update the same row twice.
2789 : */
2790 21 : xminDatum = slot_getsysattr(existing,
2791 : MinTransactionIdAttributeNumber,
2792 : &isnull);
2793 : Assert(!isnull);
2794 21 : xmin = DatumGetTransactionId(xminDatum);
2795 :
2796 21 : if (TransactionIdIsCurrentTransactionId(xmin))
2797 21 : ereport(ERROR,
2798 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2799 : /* translator: %s is a SQL command name */
2800 : errmsg("%s command cannot affect row a second time",
2801 : isUpdate ? "ON CONFLICT DO UPDATE" : "ON CONFLICT DO SELECT"),
2802 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2803 :
2804 : /* This shouldn't happen */
2805 0 : elog(ERROR, "attempted to lock invisible tuple");
2806 : break;
2807 :
2808 0 : case TM_SelfModified:
2809 :
2810 : /*
2811 : * This state should never be reached. As a dirty snapshot is used
2812 : * to find conflicting tuples, speculative insertion wouldn't have
2813 : * seen this row to conflict with.
2814 : */
2815 0 : elog(ERROR, "unexpected self-updated tuple");
2816 : break;
2817 :
2818 2 : case TM_Updated:
2819 2 : if (IsolationUsesXactSnapshot())
2820 0 : ereport(ERROR,
2821 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2822 : errmsg("could not serialize access due to concurrent update")));
2823 :
2824 : /*
2825 : * Tell caller to try again from the very start.
2826 : *
2827 : * It does not make sense to use the usual EvalPlanQual() style
2828 : * loop here, as the new version of the row might not conflict
2829 : * anymore, or the conflicting tuple has actually been deleted.
2830 : */
2831 2 : ExecClearTuple(existing);
2832 2 : return false;
2833 :
2834 1 : case TM_Deleted:
2835 1 : if (IsolationUsesXactSnapshot())
2836 0 : ereport(ERROR,
2837 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2838 : errmsg("could not serialize access due to concurrent delete")));
2839 :
2840 : /* see TM_Updated case */
2841 1 : ExecClearTuple(existing);
2842 1 : return false;
2843 :
2844 0 : default:
2845 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2846 : }
2847 :
2848 : /* Success, the tuple is locked. */
2849 2655 : return true;
2850 : }
2851 :
2852 : /*
2853 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2854 : *
2855 : * Try to lock tuple for update as part of speculative insertion. If
2856 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2857 : * (but still lock row, even though it may not satisfy estate's
2858 : * snapshot).
2859 : *
2860 : * Returns true if we're done (with or without an update), or false if
2861 : * the caller must retry the INSERT from scratch.
2862 : */
2863 : static bool
2864 2624 : ExecOnConflictUpdate(ModifyTableContext *context,
2865 : ResultRelInfo *resultRelInfo,
2866 : ItemPointer conflictTid,
2867 : TupleTableSlot *excludedSlot,
2868 : bool canSetTag,
2869 : TupleTableSlot **returning)
2870 : {
2871 2624 : ModifyTableState *mtstate = context->mtstate;
2872 2624 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2873 2624 : Relation relation = resultRelInfo->ri_RelationDesc;
2874 2624 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2875 2624 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2876 : LockTupleMode lockmode;
2877 :
2878 : /*
2879 : * Parse analysis should have blocked ON CONFLICT for all system
2880 : * relations, which includes these. There's no fundamental obstacle to
2881 : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2882 : * ExecUpdate() caller.
2883 : */
2884 : Assert(!resultRelInfo->ri_needLockTagTuple);
2885 :
2886 : /* Determine lock mode to use */
2887 2624 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2888 :
2889 : /* Lock tuple for update */
2890 2624 : if (!ExecOnConflictLockRow(context, existing, conflictTid,
2891 : resultRelInfo->ri_RelationDesc, lockmode, true))
2892 3 : return false;
2893 :
2894 : /*
2895 : * Verify that the tuple is visible to our MVCC snapshot if the current
2896 : * isolation level mandates that.
2897 : *
2898 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2899 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2900 : *
2901 : * This means we only ever continue when a new command in the current
2902 : * transaction could see the row, even though in READ COMMITTED mode the
2903 : * tuple will not be visible according to the current statement's
2904 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2905 : * versions.
2906 : */
2907 2609 : ExecCheckTupleVisible(context->estate, relation, existing);
2908 :
2909 : /*
2910 : * Make tuple and any needed join variables available to ExecQual and
2911 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2912 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2913 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2914 : * other redirection.
2915 : */
2916 2609 : econtext->ecxt_scantuple = existing;
2917 2609 : econtext->ecxt_innertuple = excludedSlot;
2918 2609 : econtext->ecxt_outertuple = NULL;
2919 :
2920 2609 : if (!ExecQual(onConflictSetWhere, econtext))
2921 : {
2922 16 : ExecClearTuple(existing); /* see return below */
2923 16 : InstrCountFiltered1(&mtstate->ps, 1);
2924 16 : return true; /* done with the tuple */
2925 : }
2926 :
2927 2593 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2928 : {
2929 : /*
2930 : * Check target's existing tuple against UPDATE-applicable USING
2931 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2932 : *
2933 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2934 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK.
2935 : * Since SELECT permission on the target table is always required for
2936 : * INSERT ... ON CONFLICT DO UPDATE, the rewriter also adds SELECT RLS
2937 : * checks/WCOs for SELECT security quals, using WCOs of the same kind,
2938 : * and this check enforces them too.
2939 : *
2940 : * The rewriter will also have associated UPDATE-applicable straight
2941 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2942 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2943 : * kinds, so there is no danger of spurious over-enforcement in the
2944 : * INSERT or UPDATE path.
2945 : */
2946 36 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2947 : existing,
2948 : mtstate->ps.state);
2949 : }
2950 :
2951 : /* Project the new tuple version */
2952 2581 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2953 :
2954 : /*
2955 : * Note that it is possible that the target tuple has been modified in
2956 : * this session, after the above table_tuple_lock. We choose to not error
2957 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2958 : * This can happen if an UPDATE is triggered from within ExecQual(),
2959 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2960 : * wCTE in the ON CONFLICT's SET.
2961 : */
2962 :
2963 : /* Execute UPDATE with projection */
2964 5147 : *returning = ExecUpdate(context, resultRelInfo,
2965 : conflictTid, NULL, existing,
2966 2581 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2967 : canSetTag);
2968 :
2969 : /*
2970 : * Clear out existing tuple, as there might not be another conflict among
2971 : * the next input rows. Don't want to hold resources till the end of the
2972 : * query. First though, make sure that the returning slot, if any, has a
2973 : * local copy of any OLD pass-by-reference values, if it refers to any OLD
2974 : * columns.
2975 : */
2976 2566 : if (*returning != NULL &&
2977 115 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
2978 6 : ExecMaterializeSlot(*returning);
2979 :
2980 2566 : ExecClearTuple(existing);
2981 :
2982 2566 : return true;
2983 : }
2984 :
2985 : /*
2986 : * ExecOnConflictSelect --- execute SELECT of INSERT ON CONFLICT DO SELECT
2987 : *
2988 : * If SELECT FOR UPDATE/SHARE is specified, try to lock tuple as part of
2989 : * speculative insertion. If a qual originating from ON CONFLICT DO SELECT is
2990 : * satisfied, select (but still lock row, even though it may not satisfy
2991 : * estate's snapshot).
2992 : *
2993 : * Returns true if we're done (with or without a select), or false if the
2994 : * caller must retry the INSERT from scratch.
2995 : */
2996 : static bool
2997 147 : ExecOnConflictSelect(ModifyTableContext *context,
2998 : ResultRelInfo *resultRelInfo,
2999 : ItemPointer conflictTid,
3000 : TupleTableSlot *excludedSlot,
3001 : bool canSetTag,
3002 : TupleTableSlot **returning)
3003 : {
3004 147 : ModifyTableState *mtstate = context->mtstate;
3005 147 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3006 147 : Relation relation = resultRelInfo->ri_RelationDesc;
3007 147 : ExprState *onConflictSelectWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
3008 147 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
3009 147 : LockClauseStrength lockStrength = resultRelInfo->ri_onConflict->oc_LockStrength;
3010 :
3011 : /*
3012 : * Parse analysis should have blocked ON CONFLICT for all system
3013 : * relations, which includes these. There's no fundamental obstacle to
3014 : * supporting this; we'd just need to handle LOCKTAG_TUPLE appropriately.
3015 : */
3016 : Assert(!resultRelInfo->ri_needLockTagTuple);
3017 :
3018 : /* Fetch/lock existing tuple, according to the requested lock strength */
3019 147 : if (lockStrength == LCS_NONE)
3020 : {
3021 92 : if (!table_tuple_fetch_row_version(relation,
3022 : conflictTid,
3023 : SnapshotAny,
3024 : existing))
3025 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
3026 : }
3027 : else
3028 : {
3029 : LockTupleMode lockmode;
3030 :
3031 55 : switch (lockStrength)
3032 : {
3033 1 : case LCS_FORKEYSHARE:
3034 1 : lockmode = LockTupleKeyShare;
3035 1 : break;
3036 1 : case LCS_FORSHARE:
3037 1 : lockmode = LockTupleShare;
3038 1 : break;
3039 1 : case LCS_FORNOKEYUPDATE:
3040 1 : lockmode = LockTupleNoKeyExclusive;
3041 1 : break;
3042 52 : case LCS_FORUPDATE:
3043 52 : lockmode = LockTupleExclusive;
3044 52 : break;
3045 0 : default:
3046 0 : elog(ERROR, "Unexpected lock strength %d", (int) lockStrength);
3047 : }
3048 :
3049 55 : if (!ExecOnConflictLockRow(context, existing, conflictTid,
3050 : resultRelInfo->ri_RelationDesc, lockmode, false))
3051 0 : return false;
3052 : }
3053 :
3054 : /*
3055 : * Verify that the tuple is visible to our MVCC snapshot if the current
3056 : * isolation level mandates that. See comments in ExecOnConflictUpdate().
3057 : */
3058 138 : ExecCheckTupleVisible(context->estate, relation, existing);
3059 :
3060 : /*
3061 : * Make tuple and any needed join variables available to ExecQual. The
3062 : * EXCLUDED tuple is installed in ecxt_innertuple, while the target's
3063 : * existing tuple is installed in the scantuple. EXCLUDED has been made
3064 : * to reference INNER_VAR in setrefs.c, but there is no other redirection.
3065 : */
3066 138 : econtext->ecxt_scantuple = existing;
3067 138 : econtext->ecxt_innertuple = excludedSlot;
3068 138 : econtext->ecxt_outertuple = NULL;
3069 :
3070 138 : if (!ExecQual(onConflictSelectWhere, econtext))
3071 : {
3072 18 : ExecClearTuple(existing); /* see return below */
3073 18 : InstrCountFiltered1(&mtstate->ps, 1);
3074 18 : return true; /* done with the tuple */
3075 : }
3076 :
3077 120 : if (resultRelInfo->ri_WithCheckOptions != NIL)
3078 : {
3079 : /*
3080 : * Check target's existing tuple against SELECT-applicable USING
3081 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
3082 : *
3083 : * The rewriter creates WCOs from the USING quals of SELECT policies,
3084 : * and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK. If FOR
3085 : * UPDATE/SHARE was specified, UPDATE permissions are required on the
3086 : * target table, and the rewriter also adds WCOs built from the USING
3087 : * quals of UPDATE policies, using WCOs of the same kind, and this
3088 : * check enforces them too.
3089 : */
3090 18 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
3091 : existing,
3092 : mtstate->ps.state);
3093 : }
3094 :
3095 : /* RETURNING is required for DO SELECT */
3096 : Assert(resultRelInfo->ri_projectReturning);
3097 :
3098 117 : *returning = ExecProcessReturning(context, resultRelInfo, false,
3099 : existing, existing, context->planSlot);
3100 :
3101 117 : if (canSetTag)
3102 117 : context->estate->es_processed++;
3103 :
3104 : /*
3105 : * Before releasing the existing tuple, make sure that the returning slot
3106 : * has a local copy of any pass-by-reference values.
3107 : */
3108 117 : ExecMaterializeSlot(*returning);
3109 :
3110 : /*
3111 : * Clear out existing tuple, as there might not be another conflict among
3112 : * the next input rows. Don't want to hold resources till the end of the
3113 : * query.
3114 : */
3115 117 : ExecClearTuple(existing);
3116 :
3117 117 : return true;
3118 : }
3119 :
3120 : /*
3121 : * Perform MERGE.
3122 : */
3123 : static TupleTableSlot *
3124 7456 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3125 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
3126 : {
3127 7456 : TupleTableSlot *rslot = NULL;
3128 : bool matched;
3129 :
3130 : /*-----
3131 : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
3132 : * valid, depending on whether the result relation is a table or a view.
3133 : * We execute the first action for which the additional WHEN MATCHED AND
3134 : * quals pass. If an action without quals is found, that action is
3135 : * executed.
3136 : *
3137 : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
3138 : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
3139 : * in sequence until one passes. This is almost identical to the WHEN
3140 : * MATCHED case, and both cases are handled by ExecMergeMatched().
3141 : *
3142 : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
3143 : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
3144 : * TARGET] actions in sequence until one passes.
3145 : *
3146 : * Things get interesting in case of concurrent update/delete of the
3147 : * target tuple. Such concurrent update/delete is detected while we are
3148 : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
3149 : *
3150 : * A concurrent update can:
3151 : *
3152 : * 1. modify the target tuple so that the results from checking any
3153 : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
3154 : * SOURCE actions potentially change, but the result from the join
3155 : * quals does not change.
3156 : *
3157 : * In this case, we are still dealing with the same kind of match
3158 : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
3159 : * actions from the start and choose the first one that satisfies the
3160 : * new target tuple.
3161 : *
3162 : * 2. modify the target tuple in the WHEN MATCHED case so that the join
3163 : * quals no longer pass and hence the source and target tuples no
3164 : * longer match.
3165 : *
3166 : * In this case, we are now dealing with a NOT MATCHED case, and we
3167 : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
3168 : * TARGET] actions. First ExecMergeMatched() processes the list of
3169 : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
3170 : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
3171 : * TARGET] actions in sequence until one passes. Thus we may execute
3172 : * two actions; one of each kind.
3173 : *
3174 : * Thus we support concurrent updates that turn MATCHED candidate rows
3175 : * into NOT MATCHED rows. However, we do not attempt to support cases
3176 : * that would turn NOT MATCHED rows into MATCHED rows, or which would
3177 : * cause a target row to match a different source row.
3178 : *
3179 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
3180 : * [BY TARGET].
3181 : *
3182 : * ExecMergeMatched() takes care of following the update chain and
3183 : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
3184 : * action, as long as the target tuple still exists. If the target tuple
3185 : * gets deleted or a concurrent update causes the join quals to fail, it
3186 : * returns a matched status of false and we call ExecMergeNotMatched().
3187 : * Given that ExecMergeMatched() always makes progress by following the
3188 : * update chain and we never switch from ExecMergeNotMatched() to
3189 : * ExecMergeMatched(), there is no risk of a livelock.
3190 : */
3191 7456 : matched = tupleid != NULL || oldtuple != NULL;
3192 7456 : if (matched)
3193 6110 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
3194 : canSetTag, &matched);
3195 :
3196 : /*
3197 : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
3198 : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
3199 : * "matched" to false, indicating that it no longer matches).
3200 : */
3201 7409 : if (!matched)
3202 : {
3203 : /*
3204 : * If a concurrent update turned a MATCHED case into a NOT MATCHED
3205 : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
3206 : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
3207 : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
3208 : * SOURCE action, and computed the row to return. If so, we cannot
3209 : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
3210 : * pending (to be processed on the next call to ExecModifyTable()).
3211 : * Otherwise, just process the action now.
3212 : */
3213 1355 : if (rslot == NULL)
3214 1353 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
3215 : else
3216 2 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
3217 : }
3218 :
3219 7379 : return rslot;
3220 : }
3221 :
3222 : /*
3223 : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3224 : * action, depending on whether the join quals are satisfied. If the target
3225 : * relation is a table, the current target tuple is identified by tupleid.
3226 : * Otherwise, if the target relation is a view, oldtuple is the current target
3227 : * tuple from the view.
3228 : *
3229 : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3230 : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3231 : * action do not pass, we check the second, then the third and so on. If we
3232 : * reach the end without finding a qualifying action, we return NULL.
3233 : * Otherwise, we execute the qualifying action and return its RETURNING
3234 : * result, if any, or NULL.
3235 : *
3236 : * On entry, "*matched" is assumed to be true. If a concurrent update or
3237 : * delete is detected that causes the join quals to no longer pass, we set it
3238 : * to false, indicating that the caller should process any NOT MATCHED [BY
3239 : * TARGET] actions.
3240 : *
3241 : * After a concurrent update, we restart from the first action to look for a
3242 : * new qualifying action to execute. If the join quals originally passed, and
3243 : * the concurrent update caused them to no longer pass, then we switch from
3244 : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3245 : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3246 : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3247 : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3248 : */
3249 : static TupleTableSlot *
3250 6110 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3251 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3252 : bool *matched)
3253 : {
3254 6110 : ModifyTableState *mtstate = context->mtstate;
3255 6110 : List **mergeActions = resultRelInfo->ri_MergeActions;
3256 : ItemPointerData lockedtid;
3257 : List *actionStates;
3258 6110 : TupleTableSlot *newslot = NULL;
3259 6110 : TupleTableSlot *rslot = NULL;
3260 6110 : EState *estate = context->estate;
3261 6110 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3262 : bool isNull;
3263 6110 : EPQState *epqstate = &mtstate->mt_epqstate;
3264 : ListCell *l;
3265 :
3266 : /* Expect matched to be true on entry */
3267 : Assert(*matched);
3268 :
3269 : /*
3270 : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3271 : * are done.
3272 : */
3273 6110 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3274 603 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
3275 267 : return NULL;
3276 :
3277 : /*
3278 : * Make tuple and any needed join variables available to ExecQual and
3279 : * ExecProject. The target's existing tuple is installed in the scantuple.
3280 : * This target relation's slot is required only in the case of a MATCHED
3281 : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3282 : */
3283 5843 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3284 5843 : econtext->ecxt_innertuple = context->planSlot;
3285 5843 : econtext->ecxt_outertuple = NULL;
3286 :
3287 : /*
3288 : * This routine is only invoked for matched target rows, so we should
3289 : * either have the tupleid of the target row, or an old tuple from the
3290 : * target wholerow junk attr.
3291 : */
3292 : Assert(tupleid != NULL || oldtuple != NULL);
3293 5843 : ItemPointerSetInvalid(&lockedtid);
3294 5843 : if (oldtuple != NULL)
3295 : {
3296 : Assert(!resultRelInfo->ri_needLockTagTuple);
3297 48 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3298 : false);
3299 : }
3300 : else
3301 : {
3302 5795 : if (resultRelInfo->ri_needLockTagTuple)
3303 : {
3304 : /*
3305 : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3306 : * that don't match mas_whenqual. MERGE on system catalogs is a
3307 : * minor use case, so don't bother optimizing those.
3308 : */
3309 3803 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3310 : InplaceUpdateTupleLock);
3311 3803 : lockedtid = *tupleid;
3312 : }
3313 5795 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
3314 : tupleid,
3315 : SnapshotAny,
3316 : resultRelInfo->ri_oldTupleSlot))
3317 0 : elog(ERROR, "failed to fetch the target tuple");
3318 : }
3319 :
3320 : /*
3321 : * Test the join condition. If it's satisfied, perform a MATCHED action.
3322 : * Otherwise, perform a NOT MATCHED BY SOURCE action.
3323 : *
3324 : * Note that this join condition will be NULL if there are no NOT MATCHED
3325 : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3326 : * need only consider MATCHED actions here.
3327 : */
3328 5843 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3329 5750 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
3330 : else
3331 93 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3332 :
3333 5843 : lmerge_matched:
3334 :
3335 10420 : foreach(l, actionStates)
3336 : {
3337 5923 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
3338 5923 : CmdType commandType = relaction->mas_action->commandType;
3339 : TM_Result result;
3340 5923 : UpdateContext updateCxt = {0};
3341 :
3342 : /*
3343 : * Test condition, if any.
3344 : *
3345 : * In the absence of any condition, we perform the action
3346 : * unconditionally (no need to check separately since ExecQual() will
3347 : * return true if there are no conditions to evaluate).
3348 : */
3349 5923 : if (!ExecQual(relaction->mas_whenqual, econtext))
3350 4537 : continue;
3351 :
3352 : /*
3353 : * Check if the existing target tuple meets the USING checks of
3354 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3355 : * error.
3356 : *
3357 : * The WITH CHECK quals for UPDATE RLS policies are applied in
3358 : * ExecUpdateAct() and hence we need not do anything special to handle
3359 : * them.
3360 : *
3361 : * NOTE: We must do this after WHEN quals are evaluated, so that we
3362 : * check policies only when they matter.
3363 : */
3364 1386 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3365 : {
3366 57 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
3367 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
3368 : resultRelInfo,
3369 : resultRelInfo->ri_oldTupleSlot,
3370 57 : context->mtstate->ps.state);
3371 : }
3372 :
3373 : /* Perform stated action */
3374 1374 : switch (commandType)
3375 : {
3376 1096 : case CMD_UPDATE:
3377 :
3378 : /*
3379 : * Project the output tuple, and use that to update the table.
3380 : * We don't need to filter out junk attributes, because the
3381 : * UPDATE action's targetlist doesn't have any.
3382 : */
3383 1096 : newslot = ExecProject(relaction->mas_proj);
3384 :
3385 1096 : mtstate->mt_merge_action = relaction;
3386 1096 : if (!ExecUpdatePrologue(context, resultRelInfo,
3387 : tupleid, NULL, newslot, &result))
3388 : {
3389 10 : if (result == TM_Ok)
3390 80 : goto out; /* "do nothing" */
3391 :
3392 7 : break; /* concurrent update/delete */
3393 : }
3394 :
3395 : /* INSTEAD OF ROW UPDATE Triggers */
3396 1086 : if (resultRelInfo->ri_TrigDesc &&
3397 174 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3398 : {
3399 39 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3400 : oldtuple, newslot))
3401 0 : goto out; /* "do nothing" */
3402 : }
3403 : else
3404 : {
3405 : /* checked ri_needLockTagTuple above */
3406 : Assert(oldtuple == NULL);
3407 :
3408 1047 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3409 : NULL, newslot, canSetTag,
3410 : &updateCxt);
3411 :
3412 : /*
3413 : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3414 : * cross-partition update was done, then there's nothing
3415 : * else for us to do --- the UPDATE has been turned into a
3416 : * DELETE and an INSERT, and we must not perform any of
3417 : * the usual post-update tasks. Also, the RETURNING tuple
3418 : * (if any) has been projected, so we can just return
3419 : * that.
3420 : */
3421 1035 : if (updateCxt.crossPartUpdate)
3422 : {
3423 69 : mtstate->mt_merge_updated += 1;
3424 69 : rslot = context->cpUpdateReturningSlot;
3425 69 : goto out;
3426 : }
3427 : }
3428 :
3429 1005 : if (result == TM_Ok)
3430 : {
3431 962 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3432 : tupleid, NULL, newslot);
3433 956 : mtstate->mt_merge_updated += 1;
3434 : }
3435 999 : break;
3436 :
3437 263 : case CMD_DELETE:
3438 263 : mtstate->mt_merge_action = relaction;
3439 263 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3440 : NULL, NULL, &result))
3441 : {
3442 6 : if (result == TM_Ok)
3443 3 : goto out; /* "do nothing" */
3444 :
3445 3 : break; /* concurrent update/delete */
3446 : }
3447 :
3448 : /* INSTEAD OF ROW DELETE Triggers */
3449 257 : if (resultRelInfo->ri_TrigDesc &&
3450 28 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3451 : {
3452 3 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3453 : oldtuple))
3454 0 : goto out; /* "do nothing" */
3455 : }
3456 : else
3457 : {
3458 : /* checked ri_needLockTagTuple above */
3459 : Assert(oldtuple == NULL);
3460 :
3461 254 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3462 : false);
3463 : }
3464 :
3465 257 : if (result == TM_Ok)
3466 : {
3467 248 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3468 : false);
3469 248 : mtstate->mt_merge_deleted += 1;
3470 : }
3471 257 : break;
3472 :
3473 15 : case CMD_NOTHING:
3474 : /* Doing nothing is always OK */
3475 15 : result = TM_Ok;
3476 15 : break;
3477 :
3478 0 : default:
3479 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3480 : }
3481 :
3482 1281 : switch (result)
3483 : {
3484 1219 : case TM_Ok:
3485 : /* all good; perform final actions */
3486 1219 : if (canSetTag && commandType != CMD_NOTHING)
3487 1193 : (estate->es_processed)++;
3488 :
3489 1219 : break;
3490 :
3491 16 : case TM_SelfModified:
3492 :
3493 : /*
3494 : * The target tuple was already updated or deleted by the
3495 : * current command, or by a later command in the current
3496 : * transaction. The former case is explicitly disallowed by
3497 : * the SQL standard for MERGE, which insists that the MERGE
3498 : * join condition should not join a target row to more than
3499 : * one source row.
3500 : *
3501 : * The latter case arises if the tuple is modified by a
3502 : * command in a BEFORE trigger, or perhaps by a command in a
3503 : * volatile function used in the query. In such situations we
3504 : * should not ignore the MERGE action, but it is equally
3505 : * unsafe to proceed. We don't want to discard the original
3506 : * MERGE action while keeping the triggered actions based on
3507 : * it; and it would be no better to allow the original MERGE
3508 : * action while discarding the updates that it triggered. So
3509 : * throwing an error is the only safe course.
3510 : */
3511 16 : if (context->tmfd.cmax != estate->es_output_cid)
3512 6 : ereport(ERROR,
3513 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3514 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3515 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3516 :
3517 10 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3518 10 : ereport(ERROR,
3519 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3520 : /* translator: %s is a SQL command name */
3521 : errmsg("%s command cannot affect row a second time",
3522 : "MERGE"),
3523 : errhint("Ensure that not more than one source row matches any one target row.")));
3524 :
3525 : /* This shouldn't happen */
3526 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3527 : break;
3528 :
3529 5 : case TM_Deleted:
3530 5 : if (IsolationUsesXactSnapshot())
3531 0 : ereport(ERROR,
3532 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3533 : errmsg("could not serialize access due to concurrent delete")));
3534 :
3535 : /*
3536 : * If the tuple was already deleted, set matched to false to
3537 : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3538 : */
3539 5 : *matched = false;
3540 5 : goto out;
3541 :
3542 41 : case TM_Updated:
3543 : {
3544 : bool was_matched;
3545 : Relation resultRelationDesc;
3546 : TupleTableSlot *epqslot,
3547 : *inputslot;
3548 : LockTupleMode lockmode;
3549 :
3550 : /*
3551 : * The target tuple was concurrently updated by some other
3552 : * transaction. If we are currently processing a MATCHED
3553 : * action, use EvalPlanQual() with the new version of the
3554 : * tuple and recheck the join qual, to detect a change
3555 : * from the MATCHED to the NOT MATCHED cases. If we are
3556 : * already processing a NOT MATCHED BY SOURCE action, we
3557 : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3558 : * MATCHED).
3559 : */
3560 41 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3561 41 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3562 41 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3563 :
3564 41 : if (was_matched)
3565 41 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3566 : resultRelInfo->ri_RangeTableIndex);
3567 : else
3568 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3569 :
3570 41 : result = table_tuple_lock(resultRelationDesc, tupleid,
3571 : estate->es_snapshot,
3572 : inputslot, estate->es_output_cid,
3573 : lockmode, LockWaitBlock,
3574 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3575 : &context->tmfd);
3576 41 : switch (result)
3577 : {
3578 40 : case TM_Ok:
3579 :
3580 : /*
3581 : * If the tuple was updated and migrated to
3582 : * another partition concurrently, the current
3583 : * MERGE implementation can't follow. There's
3584 : * probably a better way to handle this case, but
3585 : * it'd require recognizing the relation to which
3586 : * the tuple moved, and setting our current
3587 : * resultRelInfo to that.
3588 : */
3589 40 : if (ItemPointerIndicatesMovedPartitions(tupleid))
3590 0 : ereport(ERROR,
3591 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3592 : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3593 :
3594 : /*
3595 : * If this was a MATCHED case, use EvalPlanQual()
3596 : * to recheck the join condition.
3597 : */
3598 40 : if (was_matched)
3599 : {
3600 40 : epqslot = EvalPlanQual(epqstate,
3601 : resultRelationDesc,
3602 : resultRelInfo->ri_RangeTableIndex,
3603 : inputslot);
3604 :
3605 : /*
3606 : * If the subplan didn't return a tuple, then
3607 : * we must be dealing with an inner join for
3608 : * which the join condition no longer matches.
3609 : * This can only happen if there are no NOT
3610 : * MATCHED actions, and so there is nothing
3611 : * more to do.
3612 : */
3613 40 : if (TupIsNull(epqslot))
3614 0 : goto out;
3615 :
3616 : /*
3617 : * If we got a NULL ctid from the subplan, the
3618 : * join quals no longer pass and we switch to
3619 : * the NOT MATCHED BY SOURCE case.
3620 : */
3621 40 : (void) ExecGetJunkAttribute(epqslot,
3622 40 : resultRelInfo->ri_RowIdAttNo,
3623 : &isNull);
3624 40 : if (isNull)
3625 2 : *matched = false;
3626 :
3627 : /*
3628 : * Otherwise, recheck the join quals to see if
3629 : * we need to switch to the NOT MATCHED BY
3630 : * SOURCE case.
3631 : */
3632 40 : if (resultRelInfo->ri_needLockTagTuple)
3633 : {
3634 1 : if (ItemPointerIsValid(&lockedtid))
3635 1 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3636 : InplaceUpdateTupleLock);
3637 1 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3638 : InplaceUpdateTupleLock);
3639 1 : lockedtid = *tupleid;
3640 : }
3641 :
3642 40 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3643 : tupleid,
3644 : SnapshotAny,
3645 : resultRelInfo->ri_oldTupleSlot))
3646 0 : elog(ERROR, "failed to fetch the target tuple");
3647 :
3648 40 : if (*matched)
3649 38 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3650 : econtext);
3651 :
3652 : /* Switch lists, if necessary */
3653 40 : if (!*matched)
3654 : {
3655 4 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3656 :
3657 : /*
3658 : * If we have both NOT MATCHED BY SOURCE
3659 : * and NOT MATCHED BY TARGET actions (a
3660 : * full join between the source and target
3661 : * relations), the single previously
3662 : * matched tuple from the outer plan node
3663 : * is treated as two not matched tuples,
3664 : * in the same way as if they had not
3665 : * matched to start with. Therefore, we
3666 : * must adjust the outer plan node's tuple
3667 : * count, if we're instrumenting the
3668 : * query, to get the correct "skipped" row
3669 : * count --- see show_modifytable_info().
3670 : */
3671 4 : if (outerPlanState(mtstate)->instrument &&
3672 1 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] &&
3673 1 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET])
3674 1 : InstrUpdateTupleCount(outerPlanState(mtstate)->instrument, 1.0);
3675 : }
3676 : }
3677 :
3678 : /*
3679 : * Loop back and process the MATCHED or NOT
3680 : * MATCHED BY SOURCE actions from the start.
3681 : */
3682 40 : goto lmerge_matched;
3683 :
3684 0 : case TM_Deleted:
3685 :
3686 : /*
3687 : * tuple already deleted; tell caller to run NOT
3688 : * MATCHED [BY TARGET] actions
3689 : */
3690 0 : *matched = false;
3691 0 : goto out;
3692 :
3693 1 : case TM_SelfModified:
3694 :
3695 : /*
3696 : * This can be reached when following an update
3697 : * chain from a tuple updated by another session,
3698 : * reaching a tuple that was already updated or
3699 : * deleted by the current command, or by a later
3700 : * command in the current transaction. As above,
3701 : * this should always be treated as an error.
3702 : */
3703 1 : if (context->tmfd.cmax != estate->es_output_cid)
3704 0 : ereport(ERROR,
3705 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3706 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3707 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3708 :
3709 1 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3710 1 : ereport(ERROR,
3711 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3712 : /* translator: %s is a SQL command name */
3713 : errmsg("%s command cannot affect row a second time",
3714 : "MERGE"),
3715 : errhint("Ensure that not more than one source row matches any one target row.")));
3716 :
3717 : /* This shouldn't happen */
3718 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3719 : goto out;
3720 :
3721 0 : default:
3722 : /* see table_tuple_lock call in ExecDelete() */
3723 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3724 : result);
3725 : goto out;
3726 : }
3727 : }
3728 :
3729 0 : case TM_Invisible:
3730 : case TM_WouldBlock:
3731 : case TM_BeingModified:
3732 : /* these should not occur */
3733 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3734 : break;
3735 : }
3736 :
3737 : /* Process RETURNING if present */
3738 1219 : if (resultRelInfo->ri_projectReturning)
3739 : {
3740 214 : switch (commandType)
3741 : {
3742 94 : case CMD_UPDATE:
3743 94 : rslot = ExecProcessReturning(context,
3744 : resultRelInfo,
3745 : false,
3746 : resultRelInfo->ri_oldTupleSlot,
3747 : newslot,
3748 : context->planSlot);
3749 94 : break;
3750 :
3751 120 : case CMD_DELETE:
3752 120 : rslot = ExecProcessReturning(context,
3753 : resultRelInfo,
3754 : true,
3755 : resultRelInfo->ri_oldTupleSlot,
3756 : NULL,
3757 : context->planSlot);
3758 120 : break;
3759 :
3760 0 : case CMD_NOTHING:
3761 0 : break;
3762 :
3763 0 : default:
3764 0 : elog(ERROR, "unrecognized commandType: %d",
3765 : (int) commandType);
3766 : }
3767 : }
3768 :
3769 : /*
3770 : * We've activated one of the WHEN clauses, so we don't search
3771 : * further. This is required behaviour, not an optimization.
3772 : */
3773 1219 : break;
3774 : }
3775 :
3776 : /*
3777 : * Successfully executed an action or no qualifying action was found.
3778 : */
3779 5796 : out:
3780 5796 : if (ItemPointerIsValid(&lockedtid))
3781 3803 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3782 : InplaceUpdateTupleLock);
3783 5796 : return rslot;
3784 : }
3785 :
3786 : /*
3787 : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3788 : */
3789 : static TupleTableSlot *
3790 1355 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3791 : bool canSetTag)
3792 : {
3793 1355 : ModifyTableState *mtstate = context->mtstate;
3794 1355 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3795 : List *actionStates;
3796 1355 : TupleTableSlot *rslot = NULL;
3797 : ListCell *l;
3798 :
3799 : /*
3800 : * For INSERT actions, the root relation's merge action is OK since the
3801 : * INSERT's targetlist and the WHEN conditions can only refer to the
3802 : * source relation and hence it does not matter which result relation we
3803 : * work with.
3804 : *
3805 : * XXX does this mean that we can avoid creating copies of actionStates on
3806 : * partitioned tables, for not-matched actions?
3807 : */
3808 1355 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3809 :
3810 : /*
3811 : * Make source tuple available to ExecQual and ExecProject. We don't need
3812 : * the target tuple, since the WHEN quals and targetlist can't refer to
3813 : * the target columns.
3814 : */
3815 1355 : econtext->ecxt_scantuple = NULL;
3816 1355 : econtext->ecxt_innertuple = context->planSlot;
3817 1355 : econtext->ecxt_outertuple = NULL;
3818 :
3819 1790 : foreach(l, actionStates)
3820 : {
3821 1355 : MergeActionState *action = (MergeActionState *) lfirst(l);
3822 1355 : CmdType commandType = action->mas_action->commandType;
3823 : TupleTableSlot *newslot;
3824 :
3825 : /*
3826 : * Test condition, if any.
3827 : *
3828 : * In the absence of any condition, we perform the action
3829 : * unconditionally (no need to check separately since ExecQual() will
3830 : * return true if there are no conditions to evaluate).
3831 : */
3832 1355 : if (!ExecQual(action->mas_whenqual, econtext))
3833 435 : continue;
3834 :
3835 : /* Perform stated action */
3836 920 : switch (commandType)
3837 : {
3838 920 : case CMD_INSERT:
3839 :
3840 : /*
3841 : * Project the tuple. In case of a partitioned table, the
3842 : * projection was already built to use the root's descriptor,
3843 : * so we don't need to map the tuple here.
3844 : */
3845 920 : newslot = ExecProject(action->mas_proj);
3846 920 : mtstate->mt_merge_action = action;
3847 :
3848 920 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3849 : newslot, canSetTag, NULL, NULL);
3850 890 : mtstate->mt_merge_inserted += 1;
3851 890 : break;
3852 0 : case CMD_NOTHING:
3853 : /* Do nothing */
3854 0 : break;
3855 0 : default:
3856 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3857 : }
3858 :
3859 : /*
3860 : * We've activated one of the WHEN clauses, so we don't search
3861 : * further. This is required behaviour, not an optimization.
3862 : */
3863 890 : break;
3864 : }
3865 :
3866 1325 : return rslot;
3867 : }
3868 :
3869 : /*
3870 : * Initialize state for execution of MERGE.
3871 : */
3872 : void
3873 814 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3874 : {
3875 814 : List *mergeActionLists = mtstate->mt_mergeActionLists;
3876 814 : List *mergeJoinConditions = mtstate->mt_mergeJoinConditions;
3877 814 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3878 : ResultRelInfo *resultRelInfo;
3879 : ExprContext *econtext;
3880 : ListCell *lc;
3881 : int i;
3882 :
3883 814 : if (mergeActionLists == NIL)
3884 0 : return;
3885 :
3886 814 : mtstate->mt_merge_subcommands = 0;
3887 :
3888 814 : if (mtstate->ps.ps_ExprContext == NULL)
3889 670 : ExecAssignExprContext(estate, &mtstate->ps);
3890 814 : econtext = mtstate->ps.ps_ExprContext;
3891 :
3892 : /*
3893 : * Create a MergeActionState for each action on the mergeActionList and
3894 : * add it to either a list of matched actions or not-matched actions.
3895 : *
3896 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3897 : * anything here, do so there too.
3898 : */
3899 814 : i = 0;
3900 1749 : foreach(lc, mergeActionLists)
3901 : {
3902 935 : List *mergeActionList = lfirst(lc);
3903 : Node *joinCondition;
3904 : TupleDesc relationDesc;
3905 : ListCell *l;
3906 :
3907 935 : joinCondition = (Node *) list_nth(mergeJoinConditions, i);
3908 935 : resultRelInfo = mtstate->resultRelInfo + i;
3909 935 : i++;
3910 935 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3911 :
3912 : /* initialize slots for MERGE fetches from this rel */
3913 935 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3914 935 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3915 :
3916 : /* initialize state for join condition checking */
3917 935 : resultRelInfo->ri_MergeJoinCondition =
3918 935 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3919 :
3920 2574 : foreach(l, mergeActionList)
3921 : {
3922 1639 : MergeAction *action = (MergeAction *) lfirst(l);
3923 : MergeActionState *action_state;
3924 : TupleTableSlot *tgtslot;
3925 : TupleDesc tgtdesc;
3926 :
3927 : /*
3928 : * Build action merge state for this rel. (For partitions,
3929 : * equivalent code exists in ExecInitPartitionInfo.)
3930 : */
3931 1639 : action_state = makeNode(MergeActionState);
3932 1639 : action_state->mas_action = action;
3933 1639 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3934 : &mtstate->ps);
3935 :
3936 : /*
3937 : * We create three lists - one for each MergeMatchKind - and stick
3938 : * the MergeActionState into the appropriate list.
3939 : */
3940 3278 : resultRelInfo->ri_MergeActions[action->matchKind] =
3941 1639 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3942 : action_state);
3943 :
3944 1639 : switch (action->commandType)
3945 : {
3946 544 : case CMD_INSERT:
3947 : /* INSERT actions always use rootRelInfo */
3948 544 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3949 : action->targetList);
3950 :
3951 : /*
3952 : * If the MERGE targets a partitioned table, any INSERT
3953 : * actions must be routed through it, not the child
3954 : * relations. Initialize the routing struct and the root
3955 : * table's "new" tuple slot for that, if not already done.
3956 : * The projection we prepare, for all relations, uses the
3957 : * root relation descriptor, and targets the plan's root
3958 : * slot. (This is consistent with the fact that we
3959 : * checked the plan output to match the root relation,
3960 : * above.)
3961 : */
3962 544 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3963 : RELKIND_PARTITIONED_TABLE)
3964 : {
3965 168 : if (mtstate->mt_partition_tuple_routing == NULL)
3966 : {
3967 : /*
3968 : * Initialize planstate for routing if not already
3969 : * done.
3970 : *
3971 : * Note that the slot is managed as a standalone
3972 : * slot belonging to ModifyTableState, so we pass
3973 : * NULL for the 2nd argument.
3974 : */
3975 79 : mtstate->mt_root_tuple_slot =
3976 79 : table_slot_create(rootRelInfo->ri_RelationDesc,
3977 : NULL);
3978 79 : mtstate->mt_partition_tuple_routing =
3979 79 : ExecSetupPartitionTupleRouting(estate,
3980 : rootRelInfo->ri_RelationDesc);
3981 : }
3982 168 : tgtslot = mtstate->mt_root_tuple_slot;
3983 168 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3984 : }
3985 : else
3986 : {
3987 : /*
3988 : * If the MERGE targets an inherited table, we insert
3989 : * into the root table, so we must initialize its
3990 : * "new" tuple slot, if not already done, and use its
3991 : * relation descriptor for the projection.
3992 : *
3993 : * For non-inherited tables, rootRelInfo and
3994 : * resultRelInfo are the same, and the "new" tuple
3995 : * slot will already have been initialized.
3996 : */
3997 376 : if (rootRelInfo->ri_newTupleSlot == NULL)
3998 18 : rootRelInfo->ri_newTupleSlot =
3999 18 : table_slot_create(rootRelInfo->ri_RelationDesc,
4000 : &estate->es_tupleTable);
4001 :
4002 376 : tgtslot = rootRelInfo->ri_newTupleSlot;
4003 376 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
4004 : }
4005 :
4006 544 : action_state->mas_proj =
4007 544 : ExecBuildProjectionInfo(action->targetList, econtext,
4008 : tgtslot,
4009 : &mtstate->ps,
4010 : tgtdesc);
4011 :
4012 544 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
4013 544 : break;
4014 822 : case CMD_UPDATE:
4015 822 : action_state->mas_proj =
4016 822 : ExecBuildUpdateProjection(action->targetList,
4017 : true,
4018 : action->updateColnos,
4019 : relationDesc,
4020 : econtext,
4021 : resultRelInfo->ri_newTupleSlot,
4022 : &mtstate->ps);
4023 822 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
4024 822 : break;
4025 235 : case CMD_DELETE:
4026 235 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
4027 235 : break;
4028 38 : case CMD_NOTHING:
4029 38 : break;
4030 0 : default:
4031 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
4032 : break;
4033 : }
4034 : }
4035 : }
4036 :
4037 : /*
4038 : * If the MERGE targets an inherited table, any INSERT actions will use
4039 : * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array.
4040 : * Therefore we must initialize its WITH CHECK OPTION constraints and
4041 : * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo
4042 : * entries.
4043 : *
4044 : * Note that the planner does not build a withCheckOptionList or
4045 : * returningList for the root relation, but as in ExecInitPartitionInfo,
4046 : * we can use the first resultRelInfo entry as a reference to calculate
4047 : * the attno's for the root table.
4048 : */
4049 814 : if (rootRelInfo != mtstate->resultRelInfo &&
4050 125 : rootRelInfo->ri_RelationDesc->rd_rel->relkind != RELKIND_PARTITIONED_TABLE &&
4051 24 : (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0)
4052 : {
4053 18 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
4054 18 : Relation rootRelation = rootRelInfo->ri_RelationDesc;
4055 18 : Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
4056 18 : int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
4057 18 : AttrMap *part_attmap = NULL;
4058 : bool found_whole_row;
4059 :
4060 18 : if (node->withCheckOptionLists != NIL)
4061 : {
4062 : List *wcoList;
4063 9 : List *wcoExprs = NIL;
4064 :
4065 : /* There should be as many WCO lists as result rels */
4066 : Assert(list_length(node->withCheckOptionLists) ==
4067 : list_length(node->resultRelations));
4068 :
4069 : /*
4070 : * Use the first WCO list as a reference. In the most common case,
4071 : * this will be for the same relation as rootRelInfo, and so there
4072 : * will be no need to adjust its attno's.
4073 : */
4074 9 : wcoList = linitial(node->withCheckOptionLists);
4075 9 : if (rootRelation != firstResultRel)
4076 : {
4077 : /* Convert any Vars in it to contain the root's attno's */
4078 : part_attmap =
4079 9 : build_attrmap_by_name(RelationGetDescr(rootRelation),
4080 : RelationGetDescr(firstResultRel),
4081 : false);
4082 :
4083 : wcoList = (List *)
4084 9 : map_variable_attnos((Node *) wcoList,
4085 : firstVarno, 0,
4086 : part_attmap,
4087 9 : RelationGetForm(rootRelation)->reltype,
4088 : &found_whole_row);
4089 : }
4090 :
4091 45 : foreach(lc, wcoList)
4092 : {
4093 36 : WithCheckOption *wco = lfirst_node(WithCheckOption, lc);
4094 36 : ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
4095 : &mtstate->ps);
4096 :
4097 36 : wcoExprs = lappend(wcoExprs, wcoExpr);
4098 : }
4099 :
4100 9 : rootRelInfo->ri_WithCheckOptions = wcoList;
4101 9 : rootRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4102 : }
4103 :
4104 18 : if (node->returningLists != NIL)
4105 : {
4106 : List *returningList;
4107 :
4108 : /* There should be as many returning lists as result rels */
4109 : Assert(list_length(node->returningLists) ==
4110 : list_length(node->resultRelations));
4111 :
4112 : /*
4113 : * Use the first returning list as a reference. In the most common
4114 : * case, this will be for the same relation as rootRelInfo, and so
4115 : * there will be no need to adjust its attno's.
4116 : */
4117 3 : returningList = linitial(node->returningLists);
4118 3 : if (rootRelation != firstResultRel)
4119 : {
4120 : /* Convert any Vars in it to contain the root's attno's */
4121 3 : if (part_attmap == NULL)
4122 : part_attmap =
4123 0 : build_attrmap_by_name(RelationGetDescr(rootRelation),
4124 : RelationGetDescr(firstResultRel),
4125 : false);
4126 :
4127 : returningList = (List *)
4128 3 : map_variable_attnos((Node *) returningList,
4129 : firstVarno, 0,
4130 : part_attmap,
4131 3 : RelationGetForm(rootRelation)->reltype,
4132 : &found_whole_row);
4133 : }
4134 3 : rootRelInfo->ri_returningList = returningList;
4135 :
4136 : /* Initialize the RETURNING projection */
4137 3 : rootRelInfo->ri_projectReturning =
4138 3 : ExecBuildProjectionInfo(returningList, econtext,
4139 : mtstate->ps.ps_ResultTupleSlot,
4140 : &mtstate->ps,
4141 : RelationGetDescr(rootRelation));
4142 : }
4143 : }
4144 : }
4145 :
4146 : /*
4147 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
4148 : *
4149 : * We mark 'projectNewInfoValid' even though the projections themselves
4150 : * are not initialized here.
4151 : */
4152 : void
4153 947 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
4154 : ResultRelInfo *resultRelInfo)
4155 : {
4156 947 : EState *estate = mtstate->ps.state;
4157 :
4158 : Assert(!resultRelInfo->ri_projectNewInfoValid);
4159 :
4160 947 : resultRelInfo->ri_oldTupleSlot =
4161 947 : table_slot_create(resultRelInfo->ri_RelationDesc,
4162 : &estate->es_tupleTable);
4163 947 : resultRelInfo->ri_newTupleSlot =
4164 947 : table_slot_create(resultRelInfo->ri_RelationDesc,
4165 : &estate->es_tupleTable);
4166 947 : resultRelInfo->ri_projectNewInfoValid = true;
4167 947 : }
4168 :
4169 : /*
4170 : * Process BEFORE EACH STATEMENT triggers
4171 : */
4172 : static void
4173 59528 : fireBSTriggers(ModifyTableState *node)
4174 : {
4175 59528 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
4176 59528 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4177 :
4178 59528 : switch (node->operation)
4179 : {
4180 45477 : case CMD_INSERT:
4181 45477 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4182 45471 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
4183 458 : ExecBSUpdateTriggers(node->ps.state,
4184 : resultRelInfo);
4185 45471 : break;
4186 7220 : case CMD_UPDATE:
4187 7220 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4188 7220 : break;
4189 6094 : case CMD_DELETE:
4190 6094 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4191 6094 : break;
4192 737 : case CMD_MERGE:
4193 737 : if (node->mt_merge_subcommands & MERGE_INSERT)
4194 404 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4195 737 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4196 492 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4197 737 : if (node->mt_merge_subcommands & MERGE_DELETE)
4198 193 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4199 737 : break;
4200 0 : default:
4201 0 : elog(ERROR, "unknown operation");
4202 : break;
4203 : }
4204 59522 : }
4205 :
4206 : /*
4207 : * Process AFTER EACH STATEMENT triggers
4208 : */
4209 : static void
4210 57849 : fireASTriggers(ModifyTableState *node)
4211 : {
4212 57849 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
4213 57849 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4214 :
4215 57849 : switch (node->operation)
4216 : {
4217 44286 : case CMD_INSERT:
4218 44286 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
4219 404 : ExecASUpdateTriggers(node->ps.state,
4220 : resultRelInfo,
4221 404 : node->mt_oc_transition_capture);
4222 44286 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4223 44286 : node->mt_transition_capture);
4224 44286 : break;
4225 6864 : case CMD_UPDATE:
4226 6864 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4227 6864 : node->mt_transition_capture);
4228 6864 : break;
4229 6039 : case CMD_DELETE:
4230 6039 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4231 6039 : node->mt_transition_capture);
4232 6039 : break;
4233 660 : case CMD_MERGE:
4234 660 : if (node->mt_merge_subcommands & MERGE_DELETE)
4235 175 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4236 175 : node->mt_transition_capture);
4237 660 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4238 442 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4239 442 : node->mt_transition_capture);
4240 660 : if (node->mt_merge_subcommands & MERGE_INSERT)
4241 370 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4242 370 : node->mt_transition_capture);
4243 660 : break;
4244 0 : default:
4245 0 : elog(ERROR, "unknown operation");
4246 : break;
4247 : }
4248 57849 : }
4249 :
4250 : /*
4251 : * Set up the state needed for collecting transition tuples for AFTER
4252 : * triggers.
4253 : */
4254 : static void
4255 59710 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
4256 : {
4257 59710 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
4258 59710 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
4259 :
4260 : /* Check for transition tables on the directly targeted relation. */
4261 59710 : mtstate->mt_transition_capture =
4262 59710 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4263 59710 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4264 : mtstate->operation);
4265 59710 : if (plan->operation == CMD_INSERT &&
4266 45481 : plan->onConflictAction == ONCONFLICT_UPDATE)
4267 461 : mtstate->mt_oc_transition_capture =
4268 461 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4269 461 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4270 : CMD_UPDATE);
4271 59710 : }
4272 :
4273 : /*
4274 : * ExecPrepareTupleRouting --- prepare for routing one tuple
4275 : *
4276 : * Determine the partition in which the tuple in slot is to be inserted,
4277 : * and return its ResultRelInfo in *partRelInfo. The return value is
4278 : * a slot holding the tuple of the partition rowtype.
4279 : *
4280 : * This also sets the transition table information in mtstate based on the
4281 : * selected partition.
4282 : */
4283 : static TupleTableSlot *
4284 379450 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
4285 : EState *estate,
4286 : PartitionTupleRouting *proute,
4287 : ResultRelInfo *targetRelInfo,
4288 : TupleTableSlot *slot,
4289 : ResultRelInfo **partRelInfo)
4290 : {
4291 : ResultRelInfo *partrel;
4292 : TupleConversionMap *map;
4293 :
4294 : /*
4295 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
4296 : * not find a valid partition for the tuple in 'slot' then an error is
4297 : * raised. An error may also be raised if the found partition is not a
4298 : * valid target for INSERTs. This is required since a partitioned table
4299 : * UPDATE to another partition becomes a DELETE+INSERT.
4300 : */
4301 379450 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
4302 :
4303 : /*
4304 : * If we're capturing transition tuples, we might need to convert from the
4305 : * partition rowtype to root partitioned table's rowtype. But if there
4306 : * are no BEFORE triggers on the partition that could change the tuple, we
4307 : * can just remember the original unconverted tuple to avoid a needless
4308 : * round trip conversion.
4309 : */
4310 379339 : if (mtstate->mt_transition_capture != NULL)
4311 : {
4312 : bool has_before_insert_row_trig;
4313 :
4314 98 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
4315 21 : partrel->ri_TrigDesc->trig_insert_before_row);
4316 :
4317 77 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
4318 77 : !has_before_insert_row_trig ? slot : NULL;
4319 : }
4320 :
4321 : /*
4322 : * Convert the tuple, if necessary.
4323 : */
4324 379339 : map = ExecGetRootToChildMap(partrel, estate);
4325 379339 : if (map != NULL)
4326 : {
4327 34294 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
4328 :
4329 34294 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
4330 : }
4331 :
4332 379339 : *partRelInfo = partrel;
4333 379339 : return slot;
4334 : }
4335 :
4336 : /* ----------------------------------------------------------------
4337 : * ExecModifyTable
4338 : *
4339 : * Perform table modifications as required, and return RETURNING results
4340 : * if needed.
4341 : * ----------------------------------------------------------------
4342 : */
4343 : static TupleTableSlot *
4344 64834 : ExecModifyTable(PlanState *pstate)
4345 : {
4346 64834 : ModifyTableState *node = castNode(ModifyTableState, pstate);
4347 : ModifyTableContext context;
4348 64834 : EState *estate = node->ps.state;
4349 64834 : CmdType operation = node->operation;
4350 : ResultRelInfo *resultRelInfo;
4351 : PlanState *subplanstate;
4352 : TupleTableSlot *slot;
4353 : TupleTableSlot *oldSlot;
4354 : ItemPointerData tuple_ctid;
4355 : HeapTupleData oldtupdata;
4356 : HeapTuple oldtuple;
4357 : ItemPointer tupleid;
4358 : bool tuplock;
4359 :
4360 64834 : CHECK_FOR_INTERRUPTS();
4361 :
4362 : /*
4363 : * This should NOT get called during EvalPlanQual; we should have passed a
4364 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
4365 : * Assert because this condition is easy to miss in testing. (Note:
4366 : * although ModifyTable should not get executed within an EvalPlanQual
4367 : * operation, we do have to allow it to be initialized and shut down in
4368 : * case it is within a CTE subplan. Hence this test must be here, not in
4369 : * ExecInitModifyTable.)
4370 : */
4371 64834 : if (estate->es_epq_active != NULL)
4372 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4373 :
4374 : /*
4375 : * If we've already completed processing, don't try to do more. We need
4376 : * this test because ExecPostprocessPlan might call us an extra time, and
4377 : * our subplan's nodes aren't necessarily robust against being called
4378 : * extra times.
4379 : */
4380 64834 : if (node->mt_done)
4381 400 : return NULL;
4382 :
4383 : /*
4384 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
4385 : */
4386 64434 : if (node->fireBSTriggers)
4387 : {
4388 59528 : fireBSTriggers(node);
4389 59522 : node->fireBSTriggers = false;
4390 : }
4391 :
4392 : /* Preload local variables */
4393 64428 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4394 64428 : subplanstate = outerPlanState(node);
4395 :
4396 : /* Set global context */
4397 64428 : context.mtstate = node;
4398 64428 : context.epqstate = &node->mt_epqstate;
4399 64428 : context.estate = estate;
4400 :
4401 : /*
4402 : * Fetch rows from subplan, and execute the required table modification
4403 : * for each row.
4404 : */
4405 : for (;;)
4406 : {
4407 : /*
4408 : * Reset the per-output-tuple exprcontext. This is needed because
4409 : * triggers expect to use that context as workspace. It's a bit ugly
4410 : * to do this below the top level of the plan, however. We might need
4411 : * to rethink this later.
4412 : */
4413 7189854 : ResetPerTupleExprContext(estate);
4414 :
4415 : /*
4416 : * Reset per-tuple memory context used for processing on conflict and
4417 : * returning clauses, to free any expression evaluation storage
4418 : * allocated in the previous cycle.
4419 : */
4420 7189854 : if (pstate->ps_ExprContext)
4421 179049 : ResetExprContext(pstate->ps_ExprContext);
4422 :
4423 : /*
4424 : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4425 : * to execute, do so now --- see the comments in ExecMerge().
4426 : */
4427 7189854 : if (node->mt_merge_pending_not_matched != NULL)
4428 : {
4429 2 : context.planSlot = node->mt_merge_pending_not_matched;
4430 2 : context.cpDeletedSlot = NULL;
4431 :
4432 2 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4433 2 : node->canSetTag);
4434 :
4435 : /* Clear the pending action */
4436 2 : node->mt_merge_pending_not_matched = NULL;
4437 :
4438 : /*
4439 : * If we got a RETURNING result, return it to the caller. We'll
4440 : * continue the work on next call.
4441 : */
4442 2 : if (slot)
4443 2 : return slot;
4444 :
4445 0 : continue; /* continue with the next tuple */
4446 : }
4447 :
4448 : /* Fetch the next row from subplan */
4449 7189852 : context.planSlot = ExecProcNode(subplanstate);
4450 7189643 : context.cpDeletedSlot = NULL;
4451 :
4452 : /* No more tuples to process? */
4453 7189643 : if (TupIsNull(context.planSlot))
4454 : break;
4455 :
4456 : /*
4457 : * When there are multiple result relations, each tuple contains a
4458 : * junk column that gives the OID of the rel from which it came.
4459 : * Extract it and select the correct result relation.
4460 : */
4461 7131793 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
4462 : {
4463 : Datum datum;
4464 : bool isNull;
4465 : Oid resultoid;
4466 :
4467 2618 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4468 : &isNull);
4469 2618 : if (isNull)
4470 : {
4471 : /*
4472 : * For commands other than MERGE, any tuples having InvalidOid
4473 : * for tableoid are errors. For MERGE, we may need to handle
4474 : * them as WHEN NOT MATCHED clauses if any, so do that.
4475 : *
4476 : * Note that we use the node's toplevel resultRelInfo, not any
4477 : * specific partition's.
4478 : */
4479 254 : if (operation == CMD_MERGE)
4480 : {
4481 254 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4482 :
4483 254 : slot = ExecMerge(&context, node->resultRelInfo,
4484 254 : NULL, NULL, node->canSetTag);
4485 :
4486 : /*
4487 : * If we got a RETURNING result, return it to the caller.
4488 : * We'll continue the work on next call.
4489 : */
4490 248 : if (slot)
4491 19 : return slot;
4492 :
4493 229 : continue; /* continue with the next tuple */
4494 : }
4495 :
4496 0 : elog(ERROR, "tableoid is NULL");
4497 : }
4498 2364 : resultoid = DatumGetObjectId(datum);
4499 :
4500 : /* If it's not the same as last time, we need to locate the rel */
4501 2364 : if (resultoid != node->mt_lastResultOid)
4502 1627 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4503 : false, true);
4504 : }
4505 :
4506 : /*
4507 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4508 : * here is compute the RETURNING expressions.
4509 : */
4510 7131539 : if (resultRelInfo->ri_usesFdwDirectModify)
4511 : {
4512 : Assert(resultRelInfo->ri_projectReturning);
4513 :
4514 : /*
4515 : * A scan slot containing the data that was actually inserted,
4516 : * updated or deleted has already been made available to
4517 : * ExecProcessReturning by IterateDirectModify, so no need to
4518 : * provide it here. The individual old and new slots are not
4519 : * needed, since direct-modify is disabled if the RETURNING list
4520 : * refers to OLD/NEW values.
4521 : */
4522 : Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4523 : (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4524 :
4525 347 : slot = ExecProcessReturning(&context, resultRelInfo,
4526 : operation == CMD_DELETE,
4527 : NULL, NULL, context.planSlot);
4528 :
4529 347 : return slot;
4530 : }
4531 :
4532 7131192 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4533 7131192 : slot = context.planSlot;
4534 :
4535 7131192 : tupleid = NULL;
4536 7131192 : oldtuple = NULL;
4537 :
4538 : /*
4539 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4540 : * to be updated/deleted/merged. For a heap relation, that's a TID;
4541 : * otherwise we may have a wholerow junk attr that carries the old
4542 : * tuple in toto. Keep this in step with the part of
4543 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4544 : */
4545 7131192 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4546 : operation == CMD_MERGE)
4547 : {
4548 : char relkind;
4549 : Datum datum;
4550 : bool isNull;
4551 :
4552 992172 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4553 992172 : if (relkind == RELKIND_RELATION ||
4554 285 : relkind == RELKIND_MATVIEW ||
4555 : relkind == RELKIND_PARTITIONED_TABLE)
4556 : {
4557 : /*
4558 : * ri_RowIdAttNo refers to a ctid attribute. See the comment
4559 : * in ExecInitModifyTable().
4560 : */
4561 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo) ||
4562 : relkind == RELKIND_PARTITIONED_TABLE);
4563 991890 : datum = ExecGetJunkAttribute(slot,
4564 991890 : resultRelInfo->ri_RowIdAttNo,
4565 : &isNull);
4566 :
4567 : /*
4568 : * For commands other than MERGE, any tuples having a null row
4569 : * identifier are errors. For MERGE, we may need to handle
4570 : * them as WHEN NOT MATCHED clauses if any, so do that.
4571 : *
4572 : * Note that we use the node's toplevel resultRelInfo, not any
4573 : * specific partition's.
4574 : */
4575 991890 : if (isNull)
4576 : {
4577 1068 : if (operation == CMD_MERGE)
4578 : {
4579 1068 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4580 :
4581 1068 : slot = ExecMerge(&context, node->resultRelInfo,
4582 1068 : NULL, NULL, node->canSetTag);
4583 :
4584 : /*
4585 : * If we got a RETURNING result, return it to the
4586 : * caller. We'll continue the work on next call.
4587 : */
4588 1047 : if (slot)
4589 64 : return slot;
4590 :
4591 1004 : continue; /* continue with the next tuple */
4592 : }
4593 :
4594 0 : elog(ERROR, "ctid is NULL");
4595 : }
4596 :
4597 990822 : tupleid = (ItemPointer) DatumGetPointer(datum);
4598 990822 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4599 990822 : tupleid = &tuple_ctid;
4600 : }
4601 :
4602 : /*
4603 : * Use the wholerow attribute, when available, to reconstruct the
4604 : * old relation tuple. The old tuple serves one or both of two
4605 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4606 : * provides values for any unchanged columns for the NEW tuple of
4607 : * an UPDATE, because the subplan does not produce all the columns
4608 : * of the target table.
4609 : *
4610 : * Note that the wholerow attribute does not carry system columns,
4611 : * so foreign table triggers miss seeing those, except that we
4612 : * know enough here to set t_tableOid. Quite separately from
4613 : * this, the FDW may fetch its own junk attrs to identify the row.
4614 : *
4615 : * Other relevant relkinds, currently limited to views, always
4616 : * have a wholerow attribute.
4617 : */
4618 282 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4619 : {
4620 267 : datum = ExecGetJunkAttribute(slot,
4621 267 : resultRelInfo->ri_RowIdAttNo,
4622 : &isNull);
4623 :
4624 : /*
4625 : * For commands other than MERGE, any tuples having a null row
4626 : * identifier are errors. For MERGE, we may need to handle
4627 : * them as WHEN NOT MATCHED clauses if any, so do that.
4628 : *
4629 : * Note that we use the node's toplevel resultRelInfo, not any
4630 : * specific partition's.
4631 : */
4632 267 : if (isNull)
4633 : {
4634 24 : if (operation == CMD_MERGE)
4635 : {
4636 24 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4637 :
4638 24 : slot = ExecMerge(&context, node->resultRelInfo,
4639 24 : NULL, NULL, node->canSetTag);
4640 :
4641 : /*
4642 : * If we got a RETURNING result, return it to the
4643 : * caller. We'll continue the work on next call.
4644 : */
4645 21 : if (slot)
4646 6 : return slot;
4647 :
4648 15 : continue; /* continue with the next tuple */
4649 : }
4650 :
4651 0 : elog(ERROR, "wholerow is NULL");
4652 : }
4653 :
4654 243 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4655 243 : oldtupdata.t_len =
4656 243 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4657 243 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4658 : /* Historically, view triggers see invalid t_tableOid. */
4659 243 : oldtupdata.t_tableOid =
4660 243 : (relkind == RELKIND_VIEW) ? InvalidOid :
4661 105 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4662 :
4663 243 : oldtuple = &oldtupdata;
4664 : }
4665 : else
4666 : {
4667 : /* Only foreign tables are allowed to omit a row-ID attr */
4668 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4669 : }
4670 : }
4671 :
4672 7130100 : switch (operation)
4673 : {
4674 6139020 : case CMD_INSERT:
4675 : /* Initialize projection info if first time for this table */
4676 6139020 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4677 44880 : ExecInitInsertProjection(node, resultRelInfo);
4678 6139020 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4679 6139020 : slot = ExecInsert(&context, resultRelInfo, slot,
4680 6139020 : node->canSetTag, NULL, NULL);
4681 6137926 : break;
4682 :
4683 160012 : case CMD_UPDATE:
4684 160012 : tuplock = false;
4685 :
4686 : /* Initialize projection info if first time for this table */
4687 160012 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4688 7057 : ExecInitUpdateProjection(node, resultRelInfo);
4689 :
4690 : /*
4691 : * Make the new tuple by combining plan's output tuple with
4692 : * the old tuple being updated.
4693 : */
4694 160012 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4695 160012 : if (oldtuple != NULL)
4696 : {
4697 : Assert(!resultRelInfo->ri_needLockTagTuple);
4698 : /* Use the wholerow junk attr as the old tuple. */
4699 159 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4700 : }
4701 : else
4702 : {
4703 : /* Fetch the most recent version of old tuple. */
4704 159853 : Relation relation = resultRelInfo->ri_RelationDesc;
4705 :
4706 159853 : if (resultRelInfo->ri_needLockTagTuple)
4707 : {
4708 13397 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4709 13397 : tuplock = true;
4710 : }
4711 159853 : if (!table_tuple_fetch_row_version(relation, tupleid,
4712 : SnapshotAny,
4713 : oldSlot))
4714 0 : elog(ERROR, "failed to fetch tuple being updated");
4715 : }
4716 160012 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4717 : oldSlot);
4718 :
4719 : /* Now apply the update. */
4720 160012 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4721 160012 : oldSlot, slot, node->canSetTag);
4722 159760 : if (tuplock)
4723 13397 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4724 : InplaceUpdateTupleLock);
4725 159760 : break;
4726 :
4727 824958 : case CMD_DELETE:
4728 824958 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4729 824958 : true, false, node->canSetTag, NULL, NULL, NULL);
4730 824924 : break;
4731 :
4732 6110 : case CMD_MERGE:
4733 6110 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4734 6110 : node->canSetTag);
4735 6063 : break;
4736 :
4737 0 : default:
4738 0 : elog(ERROR, "unknown operation");
4739 : break;
4740 : }
4741 :
4742 : /*
4743 : * If we got a RETURNING result, return it to caller. We'll continue
4744 : * the work on next call.
4745 : */
4746 7128673 : if (slot)
4747 4480 : return slot;
4748 : }
4749 :
4750 : /*
4751 : * Insert remaining tuples for batch insert.
4752 : */
4753 57850 : if (estate->es_insert_pending_result_relations != NIL)
4754 13 : ExecPendingInserts(estate);
4755 :
4756 : /*
4757 : * We're done, but fire AFTER STATEMENT triggers before exiting.
4758 : */
4759 57849 : fireASTriggers(node);
4760 :
4761 57849 : node->mt_done = true;
4762 :
4763 57849 : return NULL;
4764 : }
4765 :
4766 : /*
4767 : * ExecLookupResultRelByOid
4768 : * If the table with given OID is among the result relations to be
4769 : * updated by the given ModifyTable node, return its ResultRelInfo.
4770 : *
4771 : * If not found, return NULL if missing_ok, else raise error.
4772 : *
4773 : * If update_cache is true, then upon successful lookup, update the node's
4774 : * one-element cache. ONLY ExecModifyTable may pass true for this.
4775 : */
4776 : ResultRelInfo *
4777 5582 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4778 : bool missing_ok, bool update_cache)
4779 : {
4780 5582 : if (node->mt_resultOidHash)
4781 : {
4782 : /* Use the pre-built hash table to locate the rel */
4783 : MTTargetRelLookup *mtlookup;
4784 :
4785 : mtlookup = (MTTargetRelLookup *)
4786 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4787 0 : if (mtlookup)
4788 : {
4789 0 : if (update_cache)
4790 : {
4791 0 : node->mt_lastResultOid = resultoid;
4792 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
4793 : }
4794 0 : return node->resultRelInfo + mtlookup->relationIndex;
4795 : }
4796 : }
4797 : else
4798 : {
4799 : /* With few target rels, just search the ResultRelInfo array */
4800 10545 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4801 : {
4802 6844 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4803 :
4804 6844 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4805 : {
4806 1881 : if (update_cache)
4807 : {
4808 1627 : node->mt_lastResultOid = resultoid;
4809 1627 : node->mt_lastResultIndex = ndx;
4810 : }
4811 1881 : return rInfo;
4812 : }
4813 : }
4814 : }
4815 :
4816 3701 : if (!missing_ok)
4817 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
4818 3701 : return NULL;
4819 : }
4820 :
4821 : /* ----------------------------------------------------------------
4822 : * ExecInitModifyTable
4823 : * ----------------------------------------------------------------
4824 : */
4825 : ModifyTableState *
4826 60239 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4827 : {
4828 : ModifyTableState *mtstate;
4829 60239 : Plan *subplan = outerPlan(node);
4830 60239 : CmdType operation = node->operation;
4831 60239 : int total_nrels = list_length(node->resultRelations);
4832 : int nrels;
4833 60239 : List *resultRelations = NIL;
4834 60239 : List *withCheckOptionLists = NIL;
4835 60239 : List *returningLists = NIL;
4836 60239 : List *updateColnosLists = NIL;
4837 60239 : List *mergeActionLists = NIL;
4838 60239 : List *mergeJoinConditions = NIL;
4839 : ResultRelInfo *resultRelInfo;
4840 : List *arowmarks;
4841 : ListCell *l;
4842 : int i;
4843 : Relation rel;
4844 :
4845 : /* check for unsupported flags */
4846 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4847 :
4848 : /*
4849 : * Only consider unpruned relations for initializing their ResultRelInfo
4850 : * struct and other fields such as withCheckOptions, etc.
4851 : *
4852 : * Note: We must avoid pruning every result relation. This is important
4853 : * for MERGE, since even if every result relation is pruned from the
4854 : * subplan, there might still be NOT MATCHED rows, for which there may be
4855 : * INSERT actions to perform. To allow these actions to be found, at
4856 : * least one result relation must be kept. Also, when inserting into a
4857 : * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct
4858 : * as a reference for building the ResultRelInfo of the target partition.
4859 : * In either case, it doesn't matter which result relation is kept, so we
4860 : * just keep the first one, if all others have been pruned. See also,
4861 : * ExecDoInitialPruning(), which ensures that this first result relation
4862 : * has been locked.
4863 : */
4864 60239 : i = 0;
4865 121744 : foreach(l, node->resultRelations)
4866 : {
4867 61505 : Index rti = lfirst_int(l);
4868 : bool keep_rel;
4869 :
4870 61505 : keep_rel = bms_is_member(rti, estate->es_unpruned_relids);
4871 61505 : if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL)
4872 : {
4873 : /* all result relations pruned; keep the first one */
4874 24 : keep_rel = true;
4875 24 : rti = linitial_int(node->resultRelations);
4876 24 : i = 0;
4877 : }
4878 :
4879 61505 : if (keep_rel)
4880 : {
4881 61462 : resultRelations = lappend_int(resultRelations, rti);
4882 61462 : if (node->withCheckOptionLists)
4883 : {
4884 799 : List *withCheckOptions = list_nth_node(List,
4885 : node->withCheckOptionLists,
4886 : i);
4887 :
4888 799 : withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions);
4889 : }
4890 61462 : if (node->returningLists)
4891 : {
4892 3431 : List *returningList = list_nth_node(List,
4893 : node->returningLists,
4894 : i);
4895 :
4896 3431 : returningLists = lappend(returningLists, returningList);
4897 : }
4898 61462 : if (node->updateColnosLists)
4899 : {
4900 8436 : List *updateColnosList = list_nth(node->updateColnosLists, i);
4901 :
4902 8436 : updateColnosLists = lappend(updateColnosLists, updateColnosList);
4903 : }
4904 61462 : if (node->mergeActionLists)
4905 : {
4906 941 : List *mergeActionList = list_nth(node->mergeActionLists, i);
4907 :
4908 941 : mergeActionLists = lappend(mergeActionLists, mergeActionList);
4909 : }
4910 61462 : if (node->mergeJoinConditions)
4911 : {
4912 941 : List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i);
4913 :
4914 941 : mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition);
4915 : }
4916 : }
4917 61505 : i++;
4918 : }
4919 60239 : nrels = list_length(resultRelations);
4920 : Assert(nrels > 0);
4921 :
4922 : /*
4923 : * create state structure
4924 : */
4925 60239 : mtstate = makeNode(ModifyTableState);
4926 60239 : mtstate->ps.plan = (Plan *) node;
4927 60239 : mtstate->ps.state = estate;
4928 60239 : mtstate->ps.ExecProcNode = ExecModifyTable;
4929 :
4930 60239 : mtstate->operation = operation;
4931 60239 : mtstate->canSetTag = node->canSetTag;
4932 60239 : mtstate->mt_done = false;
4933 :
4934 60239 : mtstate->mt_nrels = nrels;
4935 60239 : mtstate->resultRelInfo = palloc_array(ResultRelInfo, nrels);
4936 :
4937 60239 : mtstate->mt_merge_pending_not_matched = NULL;
4938 60239 : mtstate->mt_merge_inserted = 0;
4939 60239 : mtstate->mt_merge_updated = 0;
4940 60239 : mtstate->mt_merge_deleted = 0;
4941 60239 : mtstate->mt_updateColnosLists = updateColnosLists;
4942 60239 : mtstate->mt_mergeActionLists = mergeActionLists;
4943 60239 : mtstate->mt_mergeJoinConditions = mergeJoinConditions;
4944 :
4945 : /*----------
4946 : * Resolve the target relation. This is the same as:
4947 : *
4948 : * - the relation for which we will fire FOR STATEMENT triggers,
4949 : * - the relation into whose tuple format all captured transition tuples
4950 : * must be converted, and
4951 : * - the root partitioned table used for tuple routing.
4952 : *
4953 : * If it's a partitioned or inherited table, the root partition or
4954 : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4955 : * given explicitly in node->rootRelation. Otherwise, the target relation
4956 : * is the sole relation in the node->resultRelations list and, since it can
4957 : * never be pruned, also in the resultRelations list constructed above.
4958 : *----------
4959 : */
4960 60239 : if (node->rootRelation > 0)
4961 : {
4962 : Assert(bms_is_member(node->rootRelation, estate->es_unpruned_relids));
4963 1473 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4964 1473 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4965 : node->rootRelation);
4966 : }
4967 : else
4968 : {
4969 : Assert(list_length(node->resultRelations) == 1);
4970 : Assert(list_length(resultRelations) == 1);
4971 58766 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4972 58766 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4973 58766 : linitial_int(resultRelations));
4974 : }
4975 :
4976 : /* set up epqstate with dummy subplan data for the moment */
4977 60239 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4978 : node->epqParam, resultRelations);
4979 60239 : mtstate->fireBSTriggers = true;
4980 :
4981 : /*
4982 : * Build state for collecting transition tuples. This requires having a
4983 : * valid trigger query context, so skip it in explain-only mode.
4984 : */
4985 60239 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4986 59710 : ExecSetupTransitionCaptureState(mtstate, estate);
4987 :
4988 : /*
4989 : * Open all the result relations and initialize the ResultRelInfo structs.
4990 : * (But root relation was initialized above, if it's part of the array.)
4991 : * We must do this before initializing the subplan, because direct-modify
4992 : * FDWs expect their ResultRelInfos to be available.
4993 : */
4994 60239 : resultRelInfo = mtstate->resultRelInfo;
4995 60239 : i = 0;
4996 121530 : foreach(l, resultRelations)
4997 : {
4998 61459 : Index resultRelation = lfirst_int(l);
4999 61459 : List *mergeActions = NIL;
5000 :
5001 61459 : if (mergeActionLists)
5002 941 : mergeActions = list_nth(mergeActionLists, i);
5003 :
5004 61459 : if (resultRelInfo != mtstate->rootResultRelInfo)
5005 : {
5006 2693 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
5007 :
5008 : /*
5009 : * For child result relations, store the root result relation
5010 : * pointer. We do so for the convenience of places that want to
5011 : * look at the query's original target relation but don't have the
5012 : * mtstate handy.
5013 : */
5014 2693 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
5015 : }
5016 :
5017 : /* Initialize the usesFdwDirectModify flag */
5018 61459 : resultRelInfo->ri_usesFdwDirectModify =
5019 61459 : bms_is_member(i, node->fdwDirectModifyPlans);
5020 :
5021 : /*
5022 : * Verify result relation is a valid target for the current operation
5023 : */
5024 61459 : CheckValidResultRel(resultRelInfo, operation, node->onConflictAction,
5025 : mergeActions);
5026 :
5027 61291 : resultRelInfo++;
5028 61291 : i++;
5029 : }
5030 :
5031 : /*
5032 : * Now we may initialize the subplan.
5033 : */
5034 60071 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
5035 :
5036 : /*
5037 : * Do additional per-result-relation initialization.
5038 : */
5039 121345 : for (i = 0; i < nrels; i++)
5040 : {
5041 61274 : resultRelInfo = &mtstate->resultRelInfo[i];
5042 :
5043 : /* Let FDWs init themselves for foreign-table result rels */
5044 61274 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5045 61170 : resultRelInfo->ri_FdwRoutine != NULL &&
5046 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
5047 : {
5048 170 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
5049 :
5050 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
5051 : resultRelInfo,
5052 : fdw_private,
5053 : i,
5054 : eflags);
5055 : }
5056 :
5057 : /*
5058 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
5059 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
5060 : * tables, the FDW might have created additional junk attr(s), but
5061 : * those are no concern of ours.
5062 : */
5063 61274 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
5064 : operation == CMD_MERGE)
5065 : {
5066 : char relkind;
5067 :
5068 15648 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
5069 15648 : if (relkind == RELKIND_RELATION ||
5070 354 : relkind == RELKIND_MATVIEW ||
5071 : relkind == RELKIND_PARTITIONED_TABLE)
5072 : {
5073 15318 : resultRelInfo->ri_RowIdAttNo =
5074 15318 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
5075 :
5076 : /*
5077 : * For heap relations, a ctid junk attribute must be present.
5078 : * Partitioned tables should only appear here when all leaf
5079 : * partitions were pruned, in which case no rows can be
5080 : * produced and ctid is not needed.
5081 : */
5082 15318 : if (relkind == RELKIND_PARTITIONED_TABLE)
5083 : Assert(nrels == 1);
5084 15294 : else if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5085 0 : elog(ERROR, "could not find junk ctid column");
5086 : }
5087 330 : else if (relkind == RELKIND_FOREIGN_TABLE)
5088 : {
5089 : /*
5090 : * We don't support MERGE with foreign tables for now. (It's
5091 : * problematic because the implementation uses CTID.)
5092 : */
5093 : Assert(operation != CMD_MERGE);
5094 :
5095 : /*
5096 : * When there is a row-level trigger, there should be a
5097 : * wholerow attribute. We also require it to be present in
5098 : * UPDATE and MERGE, so we can get the values of unchanged
5099 : * columns.
5100 : */
5101 186 : resultRelInfo->ri_RowIdAttNo =
5102 186 : ExecFindJunkAttributeInTlist(subplan->targetlist,
5103 : "wholerow");
5104 186 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
5105 105 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5106 0 : elog(ERROR, "could not find junk wholerow column");
5107 : }
5108 : else
5109 : {
5110 : /* Other valid target relkinds must provide wholerow */
5111 144 : resultRelInfo->ri_RowIdAttNo =
5112 144 : ExecFindJunkAttributeInTlist(subplan->targetlist,
5113 : "wholerow");
5114 144 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5115 0 : elog(ERROR, "could not find junk wholerow column");
5116 : }
5117 : }
5118 : }
5119 :
5120 : /*
5121 : * If this is an inherited update/delete/merge, there will be a junk
5122 : * attribute named "tableoid" present in the subplan's targetlist. It
5123 : * will be used to identify the result relation for a given tuple to be
5124 : * updated/deleted/merged.
5125 : */
5126 60071 : mtstate->mt_resultOidAttno =
5127 60071 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
5128 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || total_nrels == 1);
5129 60071 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
5130 60071 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
5131 :
5132 : /* Get the root target relation */
5133 60071 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
5134 :
5135 : /*
5136 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
5137 : * or MERGE might need this too, but only if it actually moves tuples
5138 : * between partitions; in that case setup is done by
5139 : * ExecCrossPartitionUpdate.
5140 : */
5141 60071 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
5142 : operation == CMD_INSERT)
5143 2312 : mtstate->mt_partition_tuple_routing =
5144 2312 : ExecSetupPartitionTupleRouting(estate, rel);
5145 :
5146 : /*
5147 : * Initialize any WITH CHECK OPTION constraints if needed.
5148 : */
5149 60071 : resultRelInfo = mtstate->resultRelInfo;
5150 60870 : foreach(l, withCheckOptionLists)
5151 : {
5152 799 : List *wcoList = (List *) lfirst(l);
5153 799 : List *wcoExprs = NIL;
5154 : ListCell *ll;
5155 :
5156 2359 : foreach(ll, wcoList)
5157 : {
5158 1560 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
5159 1560 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
5160 : &mtstate->ps);
5161 :
5162 1560 : wcoExprs = lappend(wcoExprs, wcoExpr);
5163 : }
5164 :
5165 799 : resultRelInfo->ri_WithCheckOptions = wcoList;
5166 799 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
5167 799 : resultRelInfo++;
5168 : }
5169 :
5170 : /*
5171 : * Initialize RETURNING projections if needed.
5172 : */
5173 60071 : if (returningLists)
5174 : {
5175 : TupleTableSlot *slot;
5176 : ExprContext *econtext;
5177 :
5178 : /*
5179 : * Initialize result tuple slot and assign its rowtype using the plan
5180 : * node's declared targetlist, which the planner set up to be the same
5181 : * as the first (before runtime pruning) RETURNING list. We assume
5182 : * all the result rels will produce compatible output.
5183 : */
5184 3256 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
5185 3256 : slot = mtstate->ps.ps_ResultTupleSlot;
5186 :
5187 : /* Need an econtext too */
5188 3256 : if (mtstate->ps.ps_ExprContext == NULL)
5189 3256 : ExecAssignExprContext(estate, &mtstate->ps);
5190 3256 : econtext = mtstate->ps.ps_ExprContext;
5191 :
5192 : /*
5193 : * Build a projection for each result rel.
5194 : */
5195 3256 : resultRelInfo = mtstate->resultRelInfo;
5196 6687 : foreach(l, returningLists)
5197 : {
5198 3431 : List *rlist = (List *) lfirst(l);
5199 :
5200 3431 : resultRelInfo->ri_returningList = rlist;
5201 3431 : resultRelInfo->ri_projectReturning =
5202 3431 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
5203 3431 : resultRelInfo->ri_RelationDesc->rd_att);
5204 3431 : resultRelInfo++;
5205 : }
5206 : }
5207 : else
5208 : {
5209 : /*
5210 : * We still must construct a dummy result tuple type, because InitPlan
5211 : * expects one (maybe should change that?).
5212 : */
5213 56815 : ExecInitResultTypeTL(&mtstate->ps);
5214 :
5215 56815 : mtstate->ps.ps_ExprContext = NULL;
5216 : }
5217 :
5218 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
5219 60071 : resultRelInfo = mtstate->resultRelInfo;
5220 60071 : if (node->onConflictAction != ONCONFLICT_NONE)
5221 : {
5222 : /* insert may only have one relation, inheritance is not expanded */
5223 : Assert(total_nrels == 1);
5224 905 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
5225 : }
5226 :
5227 : /*
5228 : * For ON CONFLICT DO SELECT/UPDATE, initialize the ON CONFLICT action
5229 : * state.
5230 : */
5231 60071 : if (node->onConflictAction == ONCONFLICT_UPDATE ||
5232 59574 : node->onConflictAction == ONCONFLICT_SELECT)
5233 : {
5234 665 : OnConflictActionState *onconfl = makeNode(OnConflictActionState);
5235 :
5236 : /* already exists if created by RETURNING processing above */
5237 665 : if (mtstate->ps.ps_ExprContext == NULL)
5238 350 : ExecAssignExprContext(estate, &mtstate->ps);
5239 :
5240 : /* action state for DO SELECT/UPDATE */
5241 665 : resultRelInfo->ri_onConflict = onconfl;
5242 :
5243 : /* lock strength for DO SELECT [FOR UPDATE/SHARE] */
5244 665 : onconfl->oc_LockStrength = node->onConflictLockStrength;
5245 :
5246 : /* initialize slot for the existing tuple */
5247 665 : onconfl->oc_Existing =
5248 665 : table_slot_create(resultRelInfo->ri_RelationDesc,
5249 665 : &mtstate->ps.state->es_tupleTable);
5250 :
5251 : /*
5252 : * For ON CONFLICT DO UPDATE, initialize target list and projection.
5253 : */
5254 665 : if (node->onConflictAction == ONCONFLICT_UPDATE)
5255 : {
5256 : ExprContext *econtext;
5257 : TupleDesc relationDesc;
5258 :
5259 497 : econtext = mtstate->ps.ps_ExprContext;
5260 497 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
5261 :
5262 : /*
5263 : * Create the tuple slot for the UPDATE SET projection. We want a
5264 : * slot of the table's type here, because the slot will be used to
5265 : * insert into the table, and for RETURNING processing - which may
5266 : * access system attributes.
5267 : */
5268 497 : onconfl->oc_ProjSlot =
5269 497 : table_slot_create(resultRelInfo->ri_RelationDesc,
5270 497 : &mtstate->ps.state->es_tupleTable);
5271 :
5272 : /* build UPDATE SET projection state */
5273 497 : onconfl->oc_ProjInfo =
5274 497 : ExecBuildUpdateProjection(node->onConflictSet,
5275 : true,
5276 : node->onConflictCols,
5277 : relationDesc,
5278 : econtext,
5279 : onconfl->oc_ProjSlot,
5280 : &mtstate->ps);
5281 : }
5282 :
5283 : /* initialize state to evaluate the WHERE clause, if any */
5284 665 : if (node->onConflictWhere)
5285 : {
5286 : ExprState *qualexpr;
5287 :
5288 154 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
5289 : &mtstate->ps);
5290 154 : onconfl->oc_WhereClause = qualexpr;
5291 : }
5292 : }
5293 :
5294 : /*
5295 : * If we have any secondary relations in an UPDATE or DELETE, they need to
5296 : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
5297 : * EvalPlanQual mechanism needs to be told about them. This also goes for
5298 : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
5299 : */
5300 60071 : arowmarks = NIL;
5301 61509 : foreach(l, node->rowMarks)
5302 : {
5303 1438 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
5304 1438 : RangeTblEntry *rte = exec_rt_fetch(rc->rti, estate);
5305 : ExecRowMark *erm;
5306 : ExecAuxRowMark *aerm;
5307 :
5308 : /* ignore "parent" rowmarks; they are irrelevant at runtime */
5309 1438 : if (rc->isParent)
5310 71 : continue;
5311 :
5312 : /*
5313 : * Also ignore rowmarks belonging to child tables that have been
5314 : * pruned in ExecDoInitialPruning().
5315 : */
5316 1367 : if (rte->rtekind == RTE_RELATION &&
5317 1074 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
5318 0 : continue;
5319 :
5320 : /* Find ExecRowMark and build ExecAuxRowMark */
5321 1367 : erm = ExecFindRowMark(estate, rc->rti, false);
5322 1367 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
5323 1367 : arowmarks = lappend(arowmarks, aerm);
5324 : }
5325 :
5326 : /* For a MERGE command, initialize its state */
5327 60071 : if (mtstate->operation == CMD_MERGE)
5328 814 : ExecInitMerge(mtstate, estate);
5329 :
5330 60071 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
5331 :
5332 : /*
5333 : * If there are a lot of result relations, use a hash table to speed the
5334 : * lookups. If there are not a lot, a simple linear search is faster.
5335 : *
5336 : * It's not clear where the threshold is, but try 64 for starters. In a
5337 : * debugging build, use a small threshold so that we get some test
5338 : * coverage of both code paths.
5339 : */
5340 : #ifdef USE_ASSERT_CHECKING
5341 : #define MT_NRELS_HASH 4
5342 : #else
5343 : #define MT_NRELS_HASH 64
5344 : #endif
5345 60071 : if (nrels >= MT_NRELS_HASH)
5346 : {
5347 : HASHCTL hash_ctl;
5348 :
5349 0 : hash_ctl.keysize = sizeof(Oid);
5350 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
5351 0 : hash_ctl.hcxt = CurrentMemoryContext;
5352 0 : mtstate->mt_resultOidHash =
5353 0 : hash_create("ModifyTable target hash",
5354 : nrels, &hash_ctl,
5355 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
5356 0 : for (i = 0; i < nrels; i++)
5357 : {
5358 : Oid hashkey;
5359 : MTTargetRelLookup *mtlookup;
5360 : bool found;
5361 :
5362 0 : resultRelInfo = &mtstate->resultRelInfo[i];
5363 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
5364 : mtlookup = (MTTargetRelLookup *)
5365 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
5366 : HASH_ENTER, &found);
5367 : Assert(!found);
5368 0 : mtlookup->relationIndex = i;
5369 : }
5370 : }
5371 : else
5372 60071 : mtstate->mt_resultOidHash = NULL;
5373 :
5374 : /*
5375 : * Determine if the FDW supports batch insert and determine the batch size
5376 : * (a FDW may support batching, but it may be disabled for the
5377 : * server/table).
5378 : *
5379 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
5380 : * remains set to 0.
5381 : */
5382 60071 : if (operation == CMD_INSERT)
5383 : {
5384 : /* insert may only have one relation, inheritance is not expanded */
5385 : Assert(total_nrels == 1);
5386 45626 : resultRelInfo = mtstate->resultRelInfo;
5387 45626 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5388 45626 : resultRelInfo->ri_FdwRoutine != NULL &&
5389 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
5390 88 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
5391 : {
5392 88 : resultRelInfo->ri_BatchSize =
5393 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
5394 88 : Assert(resultRelInfo->ri_BatchSize >= 1);
5395 : }
5396 : else
5397 45538 : resultRelInfo->ri_BatchSize = 1;
5398 : }
5399 :
5400 : /*
5401 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
5402 : * to estate->es_auxmodifytables so that it will be run to completion by
5403 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
5404 : * ModifyTable node too, but there's no need.) Note the use of lcons not
5405 : * lappend: we need later-initialized ModifyTable nodes to be shut down
5406 : * before earlier ones. This ensures that we don't throw away RETURNING
5407 : * rows that need to be seen by a later CTE subplan.
5408 : */
5409 60071 : if (!mtstate->canSetTag)
5410 494 : estate->es_auxmodifytables = lcons(mtstate,
5411 : estate->es_auxmodifytables);
5412 :
5413 60071 : return mtstate;
5414 : }
5415 :
5416 : /* ----------------------------------------------------------------
5417 : * ExecEndModifyTable
5418 : *
5419 : * Shuts down the plan.
5420 : *
5421 : * Returns nothing of interest.
5422 : * ----------------------------------------------------------------
5423 : */
5424 : void
5425 57816 : ExecEndModifyTable(ModifyTableState *node)
5426 : {
5427 : int i;
5428 :
5429 : /*
5430 : * Allow any FDWs to shut down
5431 : */
5432 116680 : for (i = 0; i < node->mt_nrels; i++)
5433 : {
5434 : int j;
5435 58864 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
5436 :
5437 58864 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5438 58768 : resultRelInfo->ri_FdwRoutine != NULL &&
5439 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
5440 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
5441 : resultRelInfo);
5442 :
5443 : /*
5444 : * Cleanup the initialized batch slots. This only matters for FDWs
5445 : * with batching, but the other cases will have ri_NumSlotsInitialized
5446 : * == 0.
5447 : */
5448 58892 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
5449 : {
5450 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
5451 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
5452 : }
5453 : }
5454 :
5455 : /*
5456 : * Close all the partitioned tables, leaf partitions, and their indices
5457 : * and release the slot used for tuple routing, if set.
5458 : */
5459 57816 : if (node->mt_partition_tuple_routing)
5460 : {
5461 2337 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
5462 :
5463 2337 : if (node->mt_root_tuple_slot)
5464 334 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
5465 : }
5466 :
5467 : /*
5468 : * Terminate EPQ execution if active
5469 : */
5470 57816 : EvalPlanQualEnd(&node->mt_epqstate);
5471 :
5472 : /*
5473 : * shut down subplan
5474 : */
5475 57816 : ExecEndNode(outerPlanState(node));
5476 57816 : }
5477 :
5478 : void
5479 0 : ExecReScanModifyTable(ModifyTableState *node)
5480 : {
5481 : /*
5482 : * Currently, we don't need to support rescan on ModifyTable nodes. The
5483 : * semantics of that would be a bit debatable anyway.
5484 : */
5485 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
5486 : }
|