Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * The relation to modify can be an ordinary table, a foreign table, or a
28 : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : * targeted a view not in one of those two categories, earlier processing
31 : * already pointed the ModifyTable result relation to an underlying
32 : * relation of that other view. This node does process
33 : * ri_WithCheckOptions, which may have expressions from those other,
34 : * automatically updatable views.
35 : *
36 : * MERGE runs a join between the source relation and the target table.
37 : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : * is an outer join that might output tuples without a matching target
39 : * tuple. In this case, any unmatched target tuples will have NULL
40 : * row-locating info, and only INSERT can be run. But for matched target
41 : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : * SOURCE, all tuples produced by the join will include a matching target
44 : * tuple, so all tuples contain row-locating info.
45 : *
46 : * If the query specifies RETURNING, then the ModifyTable returns a
47 : * RETURNING tuple after completing each row insert, update, or delete.
48 : * It must be called again to continue the operation. Without RETURNING,
49 : * we just loop within the node until all the work is done, then
50 : * return NULL. This avoids useless call/return overhead.
51 : */
52 :
53 : #include "postgres.h"
54 :
55 : #include "access/htup_details.h"
56 : #include "access/tableam.h"
57 : #include "access/tupconvert.h"
58 : #include "access/xact.h"
59 : #include "commands/trigger.h"
60 : #include "executor/execPartition.h"
61 : #include "executor/executor.h"
62 : #include "executor/instrument.h"
63 : #include "executor/nodeModifyTable.h"
64 : #include "foreign/fdwapi.h"
65 : #include "miscadmin.h"
66 : #include "nodes/nodeFuncs.h"
67 : #include "optimizer/optimizer.h"
68 : #include "rewrite/rewriteHandler.h"
69 : #include "rewrite/rewriteManip.h"
70 : #include "storage/lmgr.h"
71 : #include "utils/builtins.h"
72 : #include "utils/datum.h"
73 : #include "utils/injection_point.h"
74 : #include "utils/rel.h"
75 : #include "utils/snapmgr.h"
76 :
77 :
78 : typedef struct MTTargetRelLookup
79 : {
80 : Oid relationOid; /* hash key, must be first */
81 : int relationIndex; /* rel's index in resultRelInfo[] array */
82 : } MTTargetRelLookup;
83 :
84 : /*
85 : * Context struct for a ModifyTable operation, containing basic execution
86 : * state and some output variables populated by ExecUpdateAct() and
87 : * ExecDeleteAct() to report the result of their actions to callers.
88 : */
89 : typedef struct ModifyTableContext
90 : {
91 : /* Operation state */
92 : ModifyTableState *mtstate;
93 : EPQState *epqstate;
94 : EState *estate;
95 :
96 : /*
97 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
98 : * access "junk" columns that are not going to be stored.
99 : */
100 : TupleTableSlot *planSlot;
101 :
102 : /*
103 : * Information about the changes that were made concurrently to a tuple
104 : * being updated or deleted
105 : */
106 : TM_FailureData tmfd;
107 :
108 : /*
109 : * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
110 : * clause that refers to OLD columns (converted to the root's tuple
111 : * descriptor).
112 : */
113 : TupleTableSlot *cpDeletedSlot;
114 :
115 : /*
116 : * The tuple projected by the INSERT's RETURNING clause, when doing a
117 : * cross-partition UPDATE
118 : */
119 : TupleTableSlot *cpUpdateReturningSlot;
120 : } ModifyTableContext;
121 :
122 : /*
123 : * Context struct containing output data specific to UPDATE operations.
124 : */
125 : typedef struct UpdateContext
126 : {
127 : bool crossPartUpdate; /* was it a cross-partition update? */
128 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
129 :
130 : /*
131 : * Lock mode to acquire on the latest tuple version before performing
132 : * EvalPlanQual on it
133 : */
134 : LockTupleMode lockmode;
135 : } UpdateContext;
136 :
137 :
138 : static void ExecBatchInsert(ModifyTableState *mtstate,
139 : ResultRelInfo *resultRelInfo,
140 : TupleTableSlot **slots,
141 : TupleTableSlot **planSlots,
142 : int numSlots,
143 : EState *estate,
144 : bool canSetTag);
145 : static void ExecPendingInserts(EState *estate);
146 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
147 : ResultRelInfo *sourcePartInfo,
148 : ResultRelInfo *destPartInfo,
149 : ItemPointer tupleid,
150 : TupleTableSlot *oldslot,
151 : TupleTableSlot *newslot);
152 : static bool ExecOnConflictLockRow(ModifyTableContext *context,
153 : TupleTableSlot *existing,
154 : ItemPointer conflictTid,
155 : Relation relation,
156 : LockTupleMode lockmode,
157 : bool isUpdate);
158 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
159 : ResultRelInfo *resultRelInfo,
160 : ItemPointer conflictTid,
161 : TupleTableSlot *excludedSlot,
162 : bool canSetTag,
163 : TupleTableSlot **returning);
164 : static bool ExecOnConflictSelect(ModifyTableContext *context,
165 : ResultRelInfo *resultRelInfo,
166 : ItemPointer conflictTid,
167 : TupleTableSlot *excludedSlot,
168 : bool canSetTag,
169 : TupleTableSlot **returning);
170 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
171 : EState *estate,
172 : PartitionTupleRouting *proute,
173 : ResultRelInfo *targetRelInfo,
174 : TupleTableSlot *slot,
175 : ResultRelInfo **partRelInfo);
176 :
177 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
178 : ResultRelInfo *resultRelInfo,
179 : ItemPointer tupleid,
180 : HeapTuple oldtuple,
181 : bool canSetTag);
182 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
183 : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
184 : ResultRelInfo *resultRelInfo,
185 : ItemPointer tupleid,
186 : HeapTuple oldtuple,
187 : bool canSetTag,
188 : bool *matched);
189 : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
190 : ResultRelInfo *resultRelInfo,
191 : bool canSetTag);
192 :
193 :
194 : /*
195 : * Verify that the tuples to be produced by INSERT match the
196 : * target relation's rowtype
197 : *
198 : * We do this to guard against stale plans. If plan invalidation is
199 : * functioning properly then we should never get a failure here, but better
200 : * safe than sorry. Note that this is called after we have obtained lock
201 : * on the target rel, so the rowtype can't change underneath us.
202 : *
203 : * The plan output is represented by its targetlist, because that makes
204 : * handling the dropped-column case easier.
205 : *
206 : * We used to use this for UPDATE as well, but now the equivalent checks
207 : * are done in ExecBuildUpdateProjection.
208 : */
209 : static void
210 55470 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
211 : {
212 55470 : TupleDesc resultDesc = RelationGetDescr(resultRel);
213 55470 : int attno = 0;
214 : ListCell *lc;
215 :
216 173497 : foreach(lc, targetList)
217 : {
218 118027 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
219 : Form_pg_attribute attr;
220 :
221 : Assert(!tle->resjunk); /* caller removed junk items already */
222 :
223 118027 : if (attno >= resultDesc->natts)
224 0 : ereport(ERROR,
225 : (errcode(ERRCODE_DATATYPE_MISMATCH),
226 : errmsg("table row type and query-specified row type do not match"),
227 : errdetail("Query has too many columns.")));
228 118027 : attr = TupleDescAttr(resultDesc, attno);
229 118027 : attno++;
230 :
231 : /*
232 : * Special cases here should match planner's expand_insert_targetlist.
233 : */
234 118027 : if (attr->attisdropped)
235 : {
236 : /*
237 : * For a dropped column, we can't check atttypid (it's likely 0).
238 : * In any case the planner has most likely inserted an INT4 null.
239 : * What we insist on is just *some* NULL constant.
240 : */
241 443 : if (!IsA(tle->expr, Const) ||
242 443 : !((Const *) tle->expr)->constisnull)
243 0 : ereport(ERROR,
244 : (errcode(ERRCODE_DATATYPE_MISMATCH),
245 : errmsg("table row type and query-specified row type do not match"),
246 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
247 : attno)));
248 : }
249 117584 : else if (attr->attgenerated)
250 : {
251 : /*
252 : * For a generated column, the planner will have inserted a null
253 : * of the column's base type (to avoid possibly failing on domain
254 : * not-null constraints). It doesn't seem worth insisting on that
255 : * exact type though, since a null value is type-independent. As
256 : * above, just insist on *some* NULL constant.
257 : */
258 764 : if (!IsA(tle->expr, Const) ||
259 764 : !((Const *) tle->expr)->constisnull)
260 0 : ereport(ERROR,
261 : (errcode(ERRCODE_DATATYPE_MISMATCH),
262 : errmsg("table row type and query-specified row type do not match"),
263 : errdetail("Query provides a value for a generated column at ordinal position %d.",
264 : attno)));
265 : }
266 : else
267 : {
268 : /* Normal case: demand type match */
269 116820 : if (exprType((Node *) tle->expr) != attr->atttypid)
270 0 : ereport(ERROR,
271 : (errcode(ERRCODE_DATATYPE_MISMATCH),
272 : errmsg("table row type and query-specified row type do not match"),
273 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
274 : format_type_be(attr->atttypid),
275 : attno,
276 : format_type_be(exprType((Node *) tle->expr)))));
277 : }
278 : }
279 55470 : if (attno != resultDesc->natts)
280 0 : ereport(ERROR,
281 : (errcode(ERRCODE_DATATYPE_MISMATCH),
282 : errmsg("table row type and query-specified row type do not match"),
283 : errdetail("Query has too few columns.")));
284 55470 : }
285 :
286 : /*
287 : * ExecProcessReturning --- evaluate a RETURNING list
288 : *
289 : * context: context for the ModifyTable operation
290 : * resultRelInfo: current result rel
291 : * isDelete: true if the operation/merge action is a DELETE
292 : * oldSlot: slot holding old tuple deleted or updated
293 : * newSlot: slot holding new tuple inserted or updated
294 : * planSlot: slot holding tuple returned by top subplan node
295 : *
296 : * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
297 : * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
298 : * modify is disabled if the RETURNING list refers to any OLD/NEW values).
299 : *
300 : * Note: For the SELECT path of INSERT ... ON CONFLICT DO SELECT, oldSlot and
301 : * newSlot are both the existing tuple, since it's not changed.
302 : *
303 : * Returns a slot holding the result tuple
304 : */
305 : static TupleTableSlot *
306 5544 : ExecProcessReturning(ModifyTableContext *context,
307 : ResultRelInfo *resultRelInfo,
308 : bool isDelete,
309 : TupleTableSlot *oldSlot,
310 : TupleTableSlot *newSlot,
311 : TupleTableSlot *planSlot)
312 : {
313 5544 : EState *estate = context->estate;
314 5544 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
315 5544 : ExprContext *econtext = projectReturning->pi_exprContext;
316 :
317 : /* Make tuple and any needed join variables available to ExecProject */
318 5544 : if (isDelete)
319 : {
320 : /* return old tuple by default */
321 865 : if (oldSlot)
322 746 : econtext->ecxt_scantuple = oldSlot;
323 : }
324 : else
325 : {
326 : /* return new tuple by default */
327 4679 : if (newSlot)
328 4451 : econtext->ecxt_scantuple = newSlot;
329 : }
330 5544 : econtext->ecxt_outertuple = planSlot;
331 :
332 : /* Make old/new tuples available to ExecProject, if required */
333 5544 : if (oldSlot)
334 2507 : econtext->ecxt_oldtuple = oldSlot;
335 3037 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
336 124 : econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
337 : else
338 2913 : econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
339 :
340 5544 : if (newSlot)
341 4451 : econtext->ecxt_newtuple = newSlot;
342 1093 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
343 88 : econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
344 : else
345 1005 : econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
346 :
347 : /*
348 : * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
349 : * information is required to evaluate ReturningExpr nodes and also in
350 : * ExecEvalSysVar() and ExecEvalWholeRowVar().
351 : */
352 5544 : if (oldSlot == NULL)
353 3037 : projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
354 : else
355 2507 : projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
356 :
357 5544 : if (newSlot == NULL)
358 1093 : projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
359 : else
360 4451 : projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
361 :
362 : /* Compute the RETURNING expressions */
363 5544 : return ExecProject(projectReturning);
364 : }
365 :
366 : /*
367 : * ExecCheckTupleVisible -- verify tuple is visible
368 : *
369 : * It would not be consistent with guarantees of the higher isolation levels to
370 : * proceed with avoiding insertion (taking speculative insertion's alternative
371 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
372 : * Check for the need to raise a serialization failure, and do so as necessary.
373 : */
374 : static void
375 2936 : ExecCheckTupleVisible(EState *estate,
376 : Relation rel,
377 : TupleTableSlot *slot)
378 : {
379 2936 : if (!IsolationUsesXactSnapshot())
380 2894 : return;
381 :
382 42 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
383 : {
384 : Datum xminDatum;
385 : TransactionId xmin;
386 : bool isnull;
387 :
388 30 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
389 : Assert(!isnull);
390 30 : xmin = DatumGetTransactionId(xminDatum);
391 :
392 : /*
393 : * We should not raise a serialization failure if the conflict is
394 : * against a tuple inserted by our own transaction, even if it's not
395 : * visible to our snapshot. (This would happen, for example, if
396 : * conflicting keys are proposed for insertion in a single command.)
397 : */
398 30 : if (!TransactionIdIsCurrentTransactionId(xmin))
399 10 : ereport(ERROR,
400 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
401 : errmsg("could not serialize access due to concurrent update")));
402 : }
403 : }
404 :
405 : /*
406 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
407 : */
408 : static void
409 139 : ExecCheckTIDVisible(EState *estate,
410 : ResultRelInfo *relinfo,
411 : ItemPointer tid,
412 : TupleTableSlot *tempSlot)
413 : {
414 139 : Relation rel = relinfo->ri_RelationDesc;
415 :
416 : /* Redundantly check isolation level */
417 139 : if (!IsolationUsesXactSnapshot())
418 105 : return;
419 :
420 34 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
421 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
422 34 : ExecCheckTupleVisible(estate, rel, tempSlot);
423 24 : ExecClearTuple(tempSlot);
424 : }
425 :
426 : /*
427 : * Initialize generated columns handling for a tuple
428 : *
429 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or
430 : * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
431 : * This is used only for stored generated columns.
432 : *
433 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
434 : * This is used by both stored and virtual generated columns.
435 : *
436 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
437 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
438 : * cross-partition UPDATEs, since a partition might be the target of both
439 : * UPDATE and INSERT actions.
440 : */
441 : void
442 31449 : ExecInitGenerated(ResultRelInfo *resultRelInfo,
443 : EState *estate,
444 : CmdType cmdtype)
445 : {
446 31449 : Relation rel = resultRelInfo->ri_RelationDesc;
447 31449 : TupleDesc tupdesc = RelationGetDescr(rel);
448 31449 : int natts = tupdesc->natts;
449 : ExprState **ri_GeneratedExprs;
450 : int ri_NumGeneratedNeeded;
451 : Bitmapset *updatedCols;
452 : MemoryContext oldContext;
453 :
454 : /* Nothing to do if no generated columns */
455 31449 : if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual)))
456 30742 : return;
457 :
458 : /*
459 : * In an UPDATE, we can skip computing any generated columns that do not
460 : * depend on any UPDATE target column. But if there is a BEFORE ROW
461 : * UPDATE trigger, we cannot skip because the trigger might change more
462 : * columns.
463 : */
464 707 : if (cmdtype == CMD_UPDATE &&
465 151 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
466 123 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
467 : else
468 584 : updatedCols = NULL;
469 :
470 : /*
471 : * Make sure these data structures are built in the per-query memory
472 : * context so they'll survive throughout the query.
473 : */
474 707 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
475 :
476 707 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
477 707 : ri_NumGeneratedNeeded = 0;
478 :
479 2842 : for (int i = 0; i < natts; i++)
480 : {
481 2139 : char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated;
482 :
483 2139 : if (attgenerated)
484 : {
485 : Expr *expr;
486 :
487 : /* Fetch the GENERATED AS expression tree */
488 754 : expr = (Expr *) build_column_default(rel, i + 1);
489 754 : if (expr == NULL)
490 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
491 : i + 1, RelationGetRelationName(rel));
492 :
493 : /*
494 : * If it's an update with a known set of update target columns,
495 : * see if we can skip the computation.
496 : */
497 754 : if (updatedCols)
498 : {
499 131 : Bitmapset *attrs_used = NULL;
500 :
501 131 : pull_varattnos((Node *) expr, 1, &attrs_used);
502 :
503 131 : if (!bms_overlap(updatedCols, attrs_used))
504 15 : continue; /* need not update this column */
505 : }
506 :
507 : /* No luck, so prepare the expression for execution */
508 739 : if (attgenerated == ATTRIBUTE_GENERATED_STORED)
509 : {
510 691 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
511 687 : ri_NumGeneratedNeeded++;
512 : }
513 :
514 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
515 735 : if (cmdtype == CMD_UPDATE)
516 148 : resultRelInfo->ri_extraUpdatedCols =
517 148 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
518 : i + 1 - FirstLowInvalidHeapAttributeNumber);
519 : }
520 : }
521 :
522 703 : if (ri_NumGeneratedNeeded == 0)
523 : {
524 : /* didn't need it after all */
525 27 : pfree(ri_GeneratedExprs);
526 27 : ri_GeneratedExprs = NULL;
527 : }
528 :
529 : /* Save in appropriate set of fields */
530 703 : if (cmdtype == CMD_UPDATE)
531 : {
532 : /* Don't call twice */
533 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
534 :
535 151 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
536 151 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
537 :
538 151 : resultRelInfo->ri_extraUpdatedCols_valid = true;
539 : }
540 : else
541 : {
542 : /* Don't call twice */
543 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
544 :
545 552 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
546 552 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
547 : }
548 :
549 703 : MemoryContextSwitchTo(oldContext);
550 : }
551 :
552 : /*
553 : * Compute stored generated columns for a tuple
554 : */
555 : void
556 973 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
557 : EState *estate, TupleTableSlot *slot,
558 : CmdType cmdtype)
559 : {
560 973 : Relation rel = resultRelInfo->ri_RelationDesc;
561 973 : TupleDesc tupdesc = RelationGetDescr(rel);
562 973 : int natts = tupdesc->natts;
563 973 : ExprContext *econtext = GetPerTupleExprContext(estate);
564 : ExprState **ri_GeneratedExprs;
565 : MemoryContext oldContext;
566 : Datum *values;
567 : bool *nulls;
568 :
569 : /* We should not be called unless this is true */
570 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
571 :
572 : /*
573 : * Initialize the expressions if we didn't already, and check whether we
574 : * can exit early because nothing needs to be computed.
575 : */
576 973 : if (cmdtype == CMD_UPDATE)
577 : {
578 160 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
579 119 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
580 160 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
581 11 : return;
582 149 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
583 : }
584 : else
585 : {
586 813 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
587 556 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
588 : /* Early exit is impossible given the prior Assert */
589 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
590 809 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
591 : }
592 :
593 958 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
594 :
595 958 : values = palloc_array(Datum, natts);
596 958 : nulls = palloc_array(bool, natts);
597 :
598 958 : slot_getallattrs(slot);
599 958 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
600 :
601 3865 : for (int i = 0; i < natts; i++)
602 : {
603 2923 : CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
604 :
605 2923 : if (ri_GeneratedExprs[i])
606 : {
607 : Datum val;
608 : bool isnull;
609 :
610 : Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
611 :
612 971 : econtext->ecxt_scantuple = slot;
613 :
614 971 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
615 :
616 : /*
617 : * We must make a copy of val as we have no guarantees about where
618 : * memory for a pass-by-reference Datum is located.
619 : */
620 955 : if (!isnull)
621 923 : val = datumCopy(val, attr->attbyval, attr->attlen);
622 :
623 955 : values[i] = val;
624 955 : nulls[i] = isnull;
625 : }
626 : else
627 : {
628 1952 : if (!nulls[i])
629 1870 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
630 : }
631 : }
632 :
633 942 : ExecClearTuple(slot);
634 942 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
635 942 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
636 942 : ExecStoreVirtualTuple(slot);
637 942 : ExecMaterializeSlot(slot);
638 :
639 942 : MemoryContextSwitchTo(oldContext);
640 : }
641 :
642 : /*
643 : * ExecInitInsertProjection
644 : * Do one-time initialization of projection data for INSERT tuples.
645 : *
646 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
647 : *
648 : * This is also a convenient place to verify that the
649 : * output of an INSERT matches the target table.
650 : */
651 : static void
652 54770 : ExecInitInsertProjection(ModifyTableState *mtstate,
653 : ResultRelInfo *resultRelInfo)
654 : {
655 54770 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
656 54770 : Plan *subplan = outerPlan(node);
657 54770 : EState *estate = mtstate->ps.state;
658 54770 : List *insertTargetList = NIL;
659 54770 : bool need_projection = false;
660 : ListCell *l;
661 :
662 : /* Extract non-junk columns of the subplan's result tlist. */
663 170968 : foreach(l, subplan->targetlist)
664 : {
665 116198 : TargetEntry *tle = (TargetEntry *) lfirst(l);
666 :
667 116198 : if (!tle->resjunk)
668 116198 : insertTargetList = lappend(insertTargetList, tle);
669 : else
670 0 : need_projection = true;
671 : }
672 :
673 : /*
674 : * The junk-free list must produce a tuple suitable for the result
675 : * relation.
676 : */
677 54770 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
678 :
679 : /* We'll need a slot matching the table's format. */
680 54770 : resultRelInfo->ri_newTupleSlot =
681 54770 : table_slot_create(resultRelInfo->ri_RelationDesc,
682 : &estate->es_tupleTable);
683 :
684 : /* Build ProjectionInfo if needed (it probably isn't). */
685 54770 : if (need_projection)
686 : {
687 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
688 :
689 : /* need an expression context to do the projection */
690 0 : if (mtstate->ps.ps_ExprContext == NULL)
691 0 : ExecAssignExprContext(estate, &mtstate->ps);
692 :
693 0 : resultRelInfo->ri_projectNew =
694 0 : ExecBuildProjectionInfo(insertTargetList,
695 : mtstate->ps.ps_ExprContext,
696 : resultRelInfo->ri_newTupleSlot,
697 : &mtstate->ps,
698 : relDesc);
699 : }
700 :
701 54770 : resultRelInfo->ri_projectNewInfoValid = true;
702 54770 : }
703 :
704 : /*
705 : * ExecInitUpdateProjection
706 : * Do one-time initialization of projection data for UPDATE tuples.
707 : *
708 : * UPDATE always needs a projection, because (1) there's always some junk
709 : * attrs, and (2) we may need to merge values of not-updated columns from
710 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
711 : * the subplan contains only new values for the changed columns, plus row
712 : * identity info in the junk attrs.
713 : *
714 : * This is "one-time" for any given result rel, but we might touch more than
715 : * one result rel in the course of an inherited UPDATE, and each one needs
716 : * its own projection due to possible column order variation.
717 : *
718 : * This is also a convenient place to verify that the output of an UPDATE
719 : * matches the target table (ExecBuildUpdateProjection does that).
720 : */
721 : static void
722 8204 : ExecInitUpdateProjection(ModifyTableState *mtstate,
723 : ResultRelInfo *resultRelInfo)
724 : {
725 8204 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
726 8204 : Plan *subplan = outerPlan(node);
727 8204 : EState *estate = mtstate->ps.state;
728 8204 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
729 : int whichrel;
730 : List *updateColnos;
731 :
732 : /*
733 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
734 : * to, we can get the index the hard way with an integer division.
735 : */
736 8204 : whichrel = mtstate->mt_lastResultIndex;
737 8204 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
738 : {
739 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
740 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
741 : }
742 :
743 8204 : updateColnos = (List *) list_nth(mtstate->mt_updateColnosLists, whichrel);
744 :
745 : /*
746 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
747 : * produced by the subplan to get the new tuple. We need two slots, both
748 : * matching the table's desired format.
749 : */
750 8204 : resultRelInfo->ri_oldTupleSlot =
751 8204 : table_slot_create(resultRelInfo->ri_RelationDesc,
752 : &estate->es_tupleTable);
753 8204 : resultRelInfo->ri_newTupleSlot =
754 8204 : table_slot_create(resultRelInfo->ri_RelationDesc,
755 : &estate->es_tupleTable);
756 :
757 : /* need an expression context to do the projection */
758 8204 : if (mtstate->ps.ps_ExprContext == NULL)
759 7342 : ExecAssignExprContext(estate, &mtstate->ps);
760 :
761 8204 : resultRelInfo->ri_projectNew =
762 8204 : ExecBuildUpdateProjection(subplan->targetlist,
763 : false, /* subplan did the evaluation */
764 : updateColnos,
765 : relDesc,
766 : mtstate->ps.ps_ExprContext,
767 : resultRelInfo->ri_newTupleSlot,
768 : &mtstate->ps);
769 :
770 8204 : resultRelInfo->ri_projectNewInfoValid = true;
771 8204 : }
772 :
773 : /*
774 : * ExecGetInsertNewTuple
775 : * This prepares a "new" tuple ready to be inserted into given result
776 : * relation, by removing any junk columns of the plan's output tuple
777 : * and (if necessary) coercing the tuple to the right tuple format.
778 : */
779 : static TupleTableSlot *
780 7440831 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
781 : TupleTableSlot *planSlot)
782 : {
783 7440831 : ProjectionInfo *newProj = relinfo->ri_projectNew;
784 : ExprContext *econtext;
785 :
786 : /*
787 : * If there's no projection to be done, just make sure the slot is of the
788 : * right type for the target rel. If the planSlot is the right type we
789 : * can use it as-is, else copy the data into ri_newTupleSlot.
790 : */
791 7440831 : if (newProj == NULL)
792 : {
793 7440831 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
794 : {
795 6927885 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
796 6927885 : return relinfo->ri_newTupleSlot;
797 : }
798 : else
799 512946 : return planSlot;
800 : }
801 :
802 : /*
803 : * Else project; since the projection output slot is ri_newTupleSlot, this
804 : * will also fix any slot-type problem.
805 : *
806 : * Note: currently, this is dead code, because INSERT cases don't receive
807 : * any junk columns so there's never a projection to be done.
808 : */
809 0 : econtext = newProj->pi_exprContext;
810 0 : econtext->ecxt_outertuple = planSlot;
811 0 : return ExecProject(newProj);
812 : }
813 :
814 : /*
815 : * ExecGetUpdateNewTuple
816 : * This prepares a "new" tuple by combining an UPDATE subplan's output
817 : * tuple (which contains values of changed columns) with unchanged
818 : * columns taken from the old tuple.
819 : *
820 : * The subplan tuple might also contain junk columns, which are ignored.
821 : * Note that the projection also ensures we have a slot of the right type.
822 : */
823 : TupleTableSlot *
824 169879 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
825 : TupleTableSlot *planSlot,
826 : TupleTableSlot *oldSlot)
827 : {
828 169879 : ProjectionInfo *newProj = relinfo->ri_projectNew;
829 : ExprContext *econtext;
830 :
831 : /* Use a few extra Asserts to protect against outside callers */
832 : Assert(relinfo->ri_projectNewInfoValid);
833 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
834 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
835 :
836 169879 : econtext = newProj->pi_exprContext;
837 169879 : econtext->ecxt_outertuple = planSlot;
838 169879 : econtext->ecxt_scantuple = oldSlot;
839 169879 : return ExecProject(newProj);
840 : }
841 :
842 : /* ----------------------------------------------------------------
843 : * ExecInsert
844 : *
845 : * For INSERT, we have to insert the tuple into the target relation
846 : * (or partition thereof) and insert appropriate tuples into the index
847 : * relations.
848 : *
849 : * slot contains the new tuple value to be stored.
850 : *
851 : * Returns RETURNING result if any, otherwise NULL.
852 : * *inserted_tuple is the tuple that's effectively inserted;
853 : * *insert_destrel is the relation where it was inserted.
854 : * These are only set on success.
855 : *
856 : * This may change the currently active tuple conversion map in
857 : * mtstate->mt_transition_capture, so the callers must take care to
858 : * save the previous value to avoid losing track of it.
859 : * ----------------------------------------------------------------
860 : */
861 : static TupleTableSlot *
862 7442685 : ExecInsert(ModifyTableContext *context,
863 : ResultRelInfo *resultRelInfo,
864 : TupleTableSlot *slot,
865 : bool canSetTag,
866 : TupleTableSlot **inserted_tuple,
867 : ResultRelInfo **insert_destrel)
868 : {
869 7442685 : ModifyTableState *mtstate = context->mtstate;
870 7442685 : EState *estate = context->estate;
871 : Relation resultRelationDesc;
872 7442685 : List *recheckIndexes = NIL;
873 7442685 : TupleTableSlot *planSlot = context->planSlot;
874 7442685 : TupleTableSlot *result = NULL;
875 : TransitionCaptureState *ar_insert_trig_tcs;
876 7442685 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
877 7442685 : OnConflictAction onconflict = node->onConflictAction;
878 7442685 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
879 : MemoryContext oldContext;
880 :
881 : /*
882 : * If the input result relation is a partitioned table, find the leaf
883 : * partition to insert the tuple into.
884 : */
885 7442685 : if (proute)
886 : {
887 : ResultRelInfo *partRelInfo;
888 :
889 480774 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
890 : resultRelInfo, slot,
891 : &partRelInfo);
892 480630 : resultRelInfo = partRelInfo;
893 : }
894 :
895 7442541 : ExecMaterializeSlot(slot);
896 :
897 7442541 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
898 :
899 : /*
900 : * Open the table's indexes, if we have not done so already, so that we
901 : * can add new index entries for the inserted tuple.
902 : */
903 7442541 : if (resultRelationDesc->rd_rel->relhasindex &&
904 2107212 : resultRelInfo->ri_IndexRelationDescs == NULL)
905 19960 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
906 :
907 : /*
908 : * BEFORE ROW INSERT Triggers.
909 : *
910 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
911 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
912 : * violations before firing these triggers, because they can change the
913 : * values to insert. Also, they can run arbitrary user-defined code with
914 : * side-effects that we can't cancel by just not inserting the tuple.
915 : */
916 7442541 : if (resultRelInfo->ri_TrigDesc &&
917 350229 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
918 : {
919 : /* Flush any pending inserts, so rows are visible to the triggers */
920 1398 : if (estate->es_insert_pending_result_relations != NIL)
921 3 : ExecPendingInserts(estate);
922 :
923 1398 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
924 131 : return NULL; /* "do nothing" */
925 : }
926 :
927 : /* INSTEAD OF ROW INSERT Triggers */
928 7442348 : if (resultRelInfo->ri_TrigDesc &&
929 350036 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
930 : {
931 111 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
932 4 : return NULL; /* "do nothing" */
933 : }
934 7442237 : else if (resultRelInfo->ri_FdwRoutine)
935 : {
936 : /*
937 : * GENERATED expressions might reference the tableoid column, so
938 : * (re-)initialize tts_tableOid before evaluating them.
939 : */
940 1010 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
941 :
942 : /*
943 : * Compute stored generated columns
944 : */
945 1010 : if (resultRelationDesc->rd_att->constr &&
946 179 : resultRelationDesc->rd_att->constr->has_generated_stored)
947 4 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
948 : CMD_INSERT);
949 :
950 : /*
951 : * If the FDW supports batching, and batching is requested, accumulate
952 : * rows and insert them in batches. Otherwise use the per-row inserts.
953 : */
954 1010 : if (resultRelInfo->ri_BatchSize > 1)
955 : {
956 145 : bool flushed = false;
957 :
958 : /*
959 : * When we've reached the desired batch size, perform the
960 : * insertion.
961 : */
962 145 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
963 : {
964 10 : ExecBatchInsert(mtstate, resultRelInfo,
965 : resultRelInfo->ri_Slots,
966 : resultRelInfo->ri_PlanSlots,
967 : resultRelInfo->ri_NumSlots,
968 : estate, canSetTag);
969 10 : flushed = true;
970 : }
971 :
972 145 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
973 :
974 145 : if (resultRelInfo->ri_Slots == NULL)
975 : {
976 15 : resultRelInfo->ri_Slots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
977 15 : resultRelInfo->ri_PlanSlots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
978 : }
979 :
980 : /*
981 : * Initialize the batch slots. We don't know how many slots will
982 : * be needed, so we initialize them as the batch grows, and we
983 : * keep them across batches. To mitigate an inefficiency in how
984 : * resource owner handles objects with many references (as with
985 : * many slots all referencing the same tuple descriptor) we copy
986 : * the appropriate tuple descriptor for each slot.
987 : */
988 145 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
989 : {
990 72 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
991 : TupleDesc plan_tdesc =
992 72 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
993 :
994 144 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
995 72 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
996 :
997 144 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
998 72 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
999 :
1000 : /* remember how many batch slots we initialized */
1001 72 : resultRelInfo->ri_NumSlotsInitialized++;
1002 : }
1003 :
1004 145 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
1005 : slot);
1006 :
1007 145 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
1008 : planSlot);
1009 :
1010 : /*
1011 : * If these are the first tuples stored in the buffers, add the
1012 : * target rel and the mtstate to the
1013 : * es_insert_pending_result_relations and
1014 : * es_insert_pending_modifytables lists respectively, except in
1015 : * the case where flushing was done above, in which case they
1016 : * would already have been added to the lists, so no need to do
1017 : * this.
1018 : */
1019 145 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
1020 : {
1021 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
1022 : resultRelInfo));
1023 19 : estate->es_insert_pending_result_relations =
1024 19 : lappend(estate->es_insert_pending_result_relations,
1025 : resultRelInfo);
1026 19 : estate->es_insert_pending_modifytables =
1027 19 : lappend(estate->es_insert_pending_modifytables, mtstate);
1028 : }
1029 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
1030 : resultRelInfo));
1031 :
1032 145 : resultRelInfo->ri_NumSlots++;
1033 :
1034 145 : MemoryContextSwitchTo(oldContext);
1035 :
1036 145 : return NULL;
1037 : }
1038 :
1039 : /*
1040 : * insert into foreign table: let the FDW do it
1041 : */
1042 865 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
1043 : resultRelInfo,
1044 : slot,
1045 : planSlot);
1046 :
1047 862 : if (slot == NULL) /* "do nothing" */
1048 2 : return NULL;
1049 :
1050 : /*
1051 : * AFTER ROW Triggers or RETURNING expressions might reference the
1052 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
1053 : * them. (This covers the case where the FDW replaced the slot.)
1054 : */
1055 860 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1056 : }
1057 : else
1058 : {
1059 : WCOKind wco_kind;
1060 :
1061 : /*
1062 : * Constraints and GENERATED expressions might reference the tableoid
1063 : * column, so (re-)initialize tts_tableOid before evaluating them.
1064 : */
1065 7441227 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1066 :
1067 : /*
1068 : * Compute stored generated columns
1069 : */
1070 7441227 : if (resultRelationDesc->rd_att->constr &&
1071 2358943 : resultRelationDesc->rd_att->constr->has_generated_stored)
1072 784 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1073 : CMD_INSERT);
1074 :
1075 : /*
1076 : * Check any RLS WITH CHECK policies.
1077 : *
1078 : * Normally we should check INSERT policies. But if the insert is the
1079 : * result of a partition key update that moved the tuple to a new
1080 : * partition, we should instead check UPDATE policies, because we are
1081 : * executing policies defined on the target table, and not those
1082 : * defined on the child partitions.
1083 : *
1084 : * If we're running MERGE, we refer to the action that we're executing
1085 : * to know if we're doing an INSERT or UPDATE to a partition table.
1086 : */
1087 7441207 : if (mtstate->operation == CMD_UPDATE)
1088 521 : wco_kind = WCO_RLS_UPDATE_CHECK;
1089 7440686 : else if (mtstate->operation == CMD_MERGE)
1090 1177 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1091 1177 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
1092 : else
1093 7439509 : wco_kind = WCO_RLS_INSERT_CHECK;
1094 :
1095 : /*
1096 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1097 : * we are looking for at this point.
1098 : */
1099 7441207 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1100 474 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1101 :
1102 : /*
1103 : * Check the constraints of the tuple.
1104 : */
1105 7441079 : if (resultRelationDesc->rd_att->constr)
1106 2358851 : ExecConstraints(resultRelInfo, slot, estate);
1107 :
1108 : /*
1109 : * Also check the tuple against the partition constraint, if there is
1110 : * one; except that if we got here via tuple-routing, we don't need to
1111 : * if there's no BR trigger defined on the partition.
1112 : */
1113 7440598 : if (resultRelationDesc->rd_rel->relispartition &&
1114 483122 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1115 480278 : (resultRelInfo->ri_TrigDesc &&
1116 1113 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1117 2982 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1118 :
1119 7440486 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1120 2215 : {
1121 : /* Perform a speculative insertion. */
1122 : uint32 specToken;
1123 : ItemPointerData conflictTid;
1124 : ItemPointerData invalidItemPtr;
1125 : bool specConflict;
1126 : List *arbiterIndexes;
1127 :
1128 5292 : ItemPointerSetInvalid(&invalidItemPtr);
1129 5292 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1130 :
1131 : /*
1132 : * Do a non-conclusive check for conflicts first.
1133 : *
1134 : * We're not holding any locks yet, so this doesn't guarantee that
1135 : * the later insert won't conflict. But it avoids leaving behind
1136 : * a lot of canceled speculative insertions, if you run a lot of
1137 : * INSERT ON CONFLICT statements that do conflict.
1138 : *
1139 : * We loop back here if we find a conflict below, either during
1140 : * the pre-check, or when we re-check after inserting the tuple
1141 : * speculatively. Better allow interrupts in case some bug makes
1142 : * this an infinite loop.
1143 : */
1144 14 : vlock:
1145 5306 : CHECK_FOR_INTERRUPTS();
1146 5306 : specConflict = false;
1147 5306 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1148 : &conflictTid, &invalidItemPtr,
1149 : arbiterIndexes))
1150 : {
1151 : /* committed conflict tuple found */
1152 3072 : if (onconflict == ONCONFLICT_UPDATE)
1153 : {
1154 : /*
1155 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1156 : * part. Be prepared to retry if the UPDATE fails because
1157 : * of another concurrent UPDATE/DELETE to the conflict
1158 : * tuple.
1159 : */
1160 2741 : TupleTableSlot *returning = NULL;
1161 :
1162 2741 : if (ExecOnConflictUpdate(context, resultRelInfo,
1163 : &conflictTid, slot, canSetTag,
1164 : &returning))
1165 : {
1166 2686 : InstrCountTuples2(&mtstate->ps, 1);
1167 2686 : return returning;
1168 : }
1169 : else
1170 3 : goto vlock;
1171 : }
1172 331 : else if (onconflict == ONCONFLICT_SELECT)
1173 : {
1174 : /*
1175 : * In case of ON CONFLICT DO SELECT, optionally lock the
1176 : * conflicting tuple, fetch it and project RETURNING on
1177 : * it. Be prepared to retry if locking fails because of a
1178 : * concurrent UPDATE/DELETE to the conflict tuple.
1179 : */
1180 192 : TupleTableSlot *returning = NULL;
1181 :
1182 192 : if (ExecOnConflictSelect(context, resultRelInfo,
1183 : &conflictTid, slot, canSetTag,
1184 : &returning))
1185 : {
1186 176 : InstrCountTuples2(&mtstate->ps, 1);
1187 176 : return returning;
1188 : }
1189 : else
1190 0 : goto vlock;
1191 : }
1192 : else
1193 : {
1194 : /*
1195 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1196 : * verify that the tuple is visible to the executor's MVCC
1197 : * snapshot at higher isolation levels.
1198 : *
1199 : * Using ExecGetReturningSlot() to store the tuple for the
1200 : * recheck isn't that pretty, but we can't trivially use
1201 : * the input slot, because it might not be of a compatible
1202 : * type. As there's no conflicting usage of
1203 : * ExecGetReturningSlot() in the DO NOTHING case...
1204 : */
1205 : Assert(onconflict == ONCONFLICT_NOTHING);
1206 139 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1207 : ExecGetReturningSlot(estate, resultRelInfo));
1208 129 : InstrCountTuples2(&mtstate->ps, 1);
1209 129 : return NULL;
1210 : }
1211 : }
1212 :
1213 : /*
1214 : * Before we start insertion proper, acquire our "speculative
1215 : * insertion lock". Others can use that to wait for us to decide
1216 : * if we're going to go ahead with the insertion, instead of
1217 : * waiting for the whole transaction to complete.
1218 : */
1219 2230 : INJECTION_POINT("exec-insert-before-insert-speculative", NULL);
1220 2230 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1221 :
1222 : /* insert the tuple, with the speculative token */
1223 2230 : table_tuple_insert_speculative(resultRelationDesc, slot,
1224 : estate->es_output_cid,
1225 : 0,
1226 : NULL,
1227 : specToken);
1228 :
1229 : /* insert index entries for tuple */
1230 2230 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1231 : estate, EIIT_NO_DUPE_ERROR,
1232 : slot, arbiterIndexes,
1233 : &specConflict);
1234 :
1235 : /* adjust the tuple's state accordingly */
1236 2226 : table_tuple_complete_speculative(resultRelationDesc, slot,
1237 2226 : specToken, !specConflict);
1238 :
1239 : /*
1240 : * Wake up anyone waiting for our decision. They will re-check
1241 : * the tuple, see that it's no longer speculative, and wait on our
1242 : * XID as if this was a regularly inserted tuple all along. Or if
1243 : * we killed the tuple, they will see it's dead, and proceed as if
1244 : * the tuple never existed.
1245 : */
1246 2226 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1247 :
1248 : /*
1249 : * If there was a conflict, start from the beginning. We'll do
1250 : * the pre-check again, which will now find the conflicting tuple
1251 : * (unless it aborts before we get there).
1252 : */
1253 2226 : if (specConflict)
1254 : {
1255 11 : list_free(recheckIndexes);
1256 11 : goto vlock;
1257 : }
1258 :
1259 : /* Since there was no insertion conflict, we're done */
1260 : }
1261 : else
1262 : {
1263 : /* insert the tuple normally */
1264 7435194 : table_tuple_insert(resultRelationDesc, slot,
1265 : estate->es_output_cid,
1266 : 0, NULL);
1267 :
1268 : /* insert index entries for tuple */
1269 7435176 : if (resultRelInfo->ri_NumIndices > 0)
1270 2101543 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo, estate,
1271 : 0, slot, NIL,
1272 : NULL);
1273 : }
1274 : }
1275 :
1276 7437995 : if (canSetTag)
1277 7437199 : (estate->es_processed)++;
1278 :
1279 : /*
1280 : * If this insert is the result of a partition key update that moved the
1281 : * tuple to a new partition, put this row into the transition NEW TABLE,
1282 : * if there is one. We need to do this separately for DELETE and INSERT
1283 : * because they happen on different tables.
1284 : */
1285 7437995 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1286 7437995 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1287 36 : && mtstate->mt_transition_capture->tcs_update_new_table)
1288 : {
1289 32 : ExecARUpdateTriggers(estate, resultRelInfo,
1290 : NULL, NULL,
1291 : NULL,
1292 : NULL,
1293 : slot,
1294 : NULL,
1295 32 : mtstate->mt_transition_capture,
1296 : false);
1297 :
1298 : /*
1299 : * We've already captured the NEW TABLE row, so make sure any AR
1300 : * INSERT trigger fired below doesn't capture it again.
1301 : */
1302 32 : ar_insert_trig_tcs = NULL;
1303 : }
1304 :
1305 : /* AFTER ROW INSERT Triggers */
1306 7437995 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1307 : ar_insert_trig_tcs);
1308 :
1309 7437994 : list_free(recheckIndexes);
1310 :
1311 : /*
1312 : * Check any WITH CHECK OPTION constraints from parent views. We are
1313 : * required to do this after testing all constraints and uniqueness
1314 : * violations per the SQL spec, so we do it after actually inserting the
1315 : * record into the heap and all indexes.
1316 : *
1317 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1318 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1319 : *
1320 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1321 : * are looking for at this point.
1322 : */
1323 7437994 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1324 294 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1325 :
1326 : /* Process RETURNING if present */
1327 7437898 : if (resultRelInfo->ri_projectReturning)
1328 : {
1329 2720 : TupleTableSlot *oldSlot = NULL;
1330 :
1331 : /*
1332 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1333 : * refers to any OLD columns, ExecDelete() will have saved the tuple
1334 : * deleted from the original partition, which we must use here to
1335 : * compute the OLD column values. Otherwise, all OLD column values
1336 : * will be NULL.
1337 : */
1338 2720 : if (context->cpDeletedSlot)
1339 : {
1340 : TupleConversionMap *tupconv_map;
1341 :
1342 : /*
1343 : * Convert the OLD tuple to the new partition's format/slot, if
1344 : * needed. Note that ExecDelete() already converted it to the
1345 : * root's partition's format/slot.
1346 : */
1347 30 : oldSlot = context->cpDeletedSlot;
1348 30 : tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1349 30 : if (tupconv_map != NULL)
1350 : {
1351 10 : oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1352 : oldSlot,
1353 : ExecGetReturningSlot(estate,
1354 : resultRelInfo));
1355 :
1356 10 : oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1357 10 : ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1358 : }
1359 : }
1360 :
1361 2720 : result = ExecProcessReturning(context, resultRelInfo, false,
1362 : oldSlot, slot, planSlot);
1363 :
1364 : /*
1365 : * For a cross-partition UPDATE, release the old tuple, first making
1366 : * sure that the result slot has a local copy of any pass-by-reference
1367 : * values.
1368 : */
1369 2712 : if (context->cpDeletedSlot)
1370 : {
1371 30 : ExecMaterializeSlot(result);
1372 30 : ExecClearTuple(oldSlot);
1373 30 : if (context->cpDeletedSlot != oldSlot)
1374 10 : ExecClearTuple(context->cpDeletedSlot);
1375 30 : context->cpDeletedSlot = NULL;
1376 : }
1377 : }
1378 :
1379 7437890 : if (inserted_tuple)
1380 537 : *inserted_tuple = slot;
1381 7437890 : if (insert_destrel)
1382 537 : *insert_destrel = resultRelInfo;
1383 :
1384 7437890 : return result;
1385 : }
1386 :
1387 : /* ----------------------------------------------------------------
1388 : * ExecBatchInsert
1389 : *
1390 : * Insert multiple tuples in an efficient way.
1391 : * Currently, this handles inserting into a foreign table without
1392 : * RETURNING clause.
1393 : * ----------------------------------------------------------------
1394 : */
1395 : static void
1396 29 : ExecBatchInsert(ModifyTableState *mtstate,
1397 : ResultRelInfo *resultRelInfo,
1398 : TupleTableSlot **slots,
1399 : TupleTableSlot **planSlots,
1400 : int numSlots,
1401 : EState *estate,
1402 : bool canSetTag)
1403 : {
1404 : int i;
1405 29 : int numInserted = numSlots;
1406 29 : TupleTableSlot *slot = NULL;
1407 : TupleTableSlot **rslots;
1408 :
1409 : /*
1410 : * insert into foreign table: let the FDW do it
1411 : */
1412 29 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1413 : resultRelInfo,
1414 : slots,
1415 : planSlots,
1416 : &numInserted);
1417 :
1418 173 : for (i = 0; i < numInserted; i++)
1419 : {
1420 145 : slot = rslots[i];
1421 :
1422 : /*
1423 : * AFTER ROW Triggers might reference the tableoid column, so
1424 : * (re-)initialize tts_tableOid before evaluating them.
1425 : */
1426 145 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1427 :
1428 : /* AFTER ROW INSERT Triggers */
1429 145 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1430 145 : mtstate->mt_transition_capture);
1431 :
1432 : /*
1433 : * Check any WITH CHECK OPTION constraints from parent views. See the
1434 : * comment in ExecInsert.
1435 : */
1436 144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1437 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1438 : }
1439 :
1440 28 : if (canSetTag && numInserted > 0)
1441 28 : estate->es_processed += numInserted;
1442 :
1443 : /* Clean up all the slots, ready for the next batch */
1444 172 : for (i = 0; i < numSlots; i++)
1445 : {
1446 144 : ExecClearTuple(slots[i]);
1447 144 : ExecClearTuple(planSlots[i]);
1448 : }
1449 28 : resultRelInfo->ri_NumSlots = 0;
1450 28 : }
1451 :
1452 : /*
1453 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1454 : */
1455 : static void
1456 18 : ExecPendingInserts(EState *estate)
1457 : {
1458 : ListCell *l1,
1459 : *l2;
1460 :
1461 36 : forboth(l1, estate->es_insert_pending_result_relations,
1462 : l2, estate->es_insert_pending_modifytables)
1463 : {
1464 19 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1465 19 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1466 :
1467 : Assert(mtstate);
1468 19 : ExecBatchInsert(mtstate, resultRelInfo,
1469 : resultRelInfo->ri_Slots,
1470 : resultRelInfo->ri_PlanSlots,
1471 : resultRelInfo->ri_NumSlots,
1472 19 : estate, mtstate->canSetTag);
1473 : }
1474 :
1475 17 : list_free(estate->es_insert_pending_result_relations);
1476 17 : list_free(estate->es_insert_pending_modifytables);
1477 17 : estate->es_insert_pending_result_relations = NIL;
1478 17 : estate->es_insert_pending_modifytables = NIL;
1479 17 : }
1480 :
1481 : /*
1482 : * ExecDeletePrologue -- subroutine for ExecDelete
1483 : *
1484 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1485 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1486 : * the delete a no-op; otherwise, return true.
1487 : */
1488 : static bool
1489 1011691 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1490 : ItemPointer tupleid, HeapTuple oldtuple,
1491 : TupleTableSlot **epqreturnslot, TM_Result *result)
1492 : {
1493 1011691 : if (result)
1494 1034 : *result = TM_Ok;
1495 :
1496 : /* BEFORE ROW DELETE triggers */
1497 1011691 : if (resultRelInfo->ri_TrigDesc &&
1498 4659 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1499 : {
1500 : /* Flush any pending inserts, so rows are visible to the triggers */
1501 209 : if (context->estate->es_insert_pending_result_relations != NIL)
1502 1 : ExecPendingInserts(context->estate);
1503 :
1504 199 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1505 : resultRelInfo, tupleid, oldtuple,
1506 : epqreturnslot, result, &context->tmfd,
1507 209 : context->mtstate->operation == CMD_MERGE);
1508 : }
1509 :
1510 1011482 : return true;
1511 : }
1512 :
1513 : /*
1514 : * ExecDeleteAct -- subroutine for ExecDelete
1515 : *
1516 : * Actually delete the tuple from a plain table.
1517 : *
1518 : * Caller is in charge of doing EvalPlanQual as necessary
1519 : */
1520 : static TM_Result
1521 1011583 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1522 : ItemPointer tupleid, bool changingPart)
1523 : {
1524 1011583 : EState *estate = context->estate;
1525 :
1526 1011583 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1527 : estate->es_output_cid,
1528 : estate->es_snapshot,
1529 : estate->es_crosscheck_snapshot,
1530 : true /* wait for commit */ ,
1531 : &context->tmfd,
1532 : changingPart);
1533 : }
1534 :
1535 : /*
1536 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1537 : *
1538 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1539 : * including the UPDATE triggers if the deletion is being done as part of a
1540 : * cross-partition tuple move.
1541 : */
1542 : static void
1543 1011554 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1544 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1545 : {
1546 1011554 : ModifyTableState *mtstate = context->mtstate;
1547 1011554 : EState *estate = context->estate;
1548 : TransitionCaptureState *ar_delete_trig_tcs;
1549 :
1550 : /*
1551 : * If this delete is the result of a partition key update that moved the
1552 : * tuple to a new partition, put this row into the transition OLD TABLE,
1553 : * if there is one. We need to do this separately for DELETE and INSERT
1554 : * because they happen on different tables.
1555 : */
1556 1011554 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1557 1011554 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1558 36 : mtstate->mt_transition_capture->tcs_update_old_table)
1559 : {
1560 32 : ExecARUpdateTriggers(estate, resultRelInfo,
1561 : NULL, NULL,
1562 : tupleid, oldtuple,
1563 32 : NULL, NULL, mtstate->mt_transition_capture,
1564 : false);
1565 :
1566 : /*
1567 : * We've already captured the OLD TABLE row, so make sure any AR
1568 : * DELETE trigger fired below doesn't capture it again.
1569 : */
1570 32 : ar_delete_trig_tcs = NULL;
1571 : }
1572 :
1573 : /* AFTER ROW DELETE Triggers */
1574 1011554 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1575 : ar_delete_trig_tcs, changingPart);
1576 1011552 : }
1577 :
1578 : /* ----------------------------------------------------------------
1579 : * ExecDelete
1580 : *
1581 : * DELETE is like UPDATE, except that we delete the tuple and no
1582 : * index modifications are needed.
1583 : *
1584 : * When deleting from a table, tupleid identifies the tuple to delete and
1585 : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1586 : * oldtuple is passed to the triggers and identifies what to delete, and
1587 : * tupleid is invalid. When deleting from a foreign table, tupleid is
1588 : * invalid; the FDW has to figure out which row to delete using data from
1589 : * the planSlot. oldtuple is passed to foreign table triggers; it is
1590 : * NULL when the foreign table has no relevant triggers. We use
1591 : * tupleDeleted to indicate whether the tuple is actually deleted,
1592 : * callers can use it to decide whether to continue the operation. When
1593 : * this DELETE is a part of an UPDATE of partition-key, then the slot
1594 : * returned by EvalPlanQual() is passed back using output parameter
1595 : * epqreturnslot.
1596 : *
1597 : * Returns RETURNING result if any, otherwise NULL.
1598 : * ----------------------------------------------------------------
1599 : */
1600 : static TupleTableSlot *
1601 1011347 : ExecDelete(ModifyTableContext *context,
1602 : ResultRelInfo *resultRelInfo,
1603 : ItemPointer tupleid,
1604 : HeapTuple oldtuple,
1605 : bool processReturning,
1606 : bool changingPart,
1607 : bool canSetTag,
1608 : TM_Result *tmresult,
1609 : bool *tupleDeleted,
1610 : TupleTableSlot **epqreturnslot)
1611 : {
1612 1011347 : EState *estate = context->estate;
1613 1011347 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1614 1011347 : TupleTableSlot *slot = NULL;
1615 : TM_Result result;
1616 : bool saveOld;
1617 :
1618 1011347 : if (tupleDeleted)
1619 690 : *tupleDeleted = false;
1620 :
1621 : /*
1622 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1623 : * done if it says we are.
1624 : */
1625 1011347 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1626 : epqreturnslot, tmresult))
1627 33 : return NULL;
1628 :
1629 : /* INSTEAD OF ROW DELETE Triggers */
1630 1011304 : if (resultRelInfo->ri_TrigDesc &&
1631 4572 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1632 31 : {
1633 : bool dodelete;
1634 :
1635 : Assert(oldtuple != NULL);
1636 35 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1637 :
1638 35 : if (!dodelete) /* "do nothing" */
1639 4 : return NULL;
1640 : }
1641 1011269 : else if (resultRelInfo->ri_FdwRoutine)
1642 : {
1643 : /*
1644 : * delete from foreign table: let the FDW do it
1645 : *
1646 : * We offer the returning slot as a place to store RETURNING data,
1647 : * although the FDW can return some other slot if it wants.
1648 : */
1649 23 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1650 23 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1651 : resultRelInfo,
1652 : slot,
1653 : context->planSlot);
1654 :
1655 23 : if (slot == NULL) /* "do nothing" */
1656 0 : return NULL;
1657 :
1658 : /*
1659 : * RETURNING expressions might reference the tableoid column, so
1660 : * (re)initialize tts_tableOid before evaluating them.
1661 : */
1662 23 : if (TTS_EMPTY(slot))
1663 5 : ExecStoreAllNullTuple(slot);
1664 :
1665 23 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1666 : }
1667 : else
1668 : {
1669 : /*
1670 : * delete the tuple
1671 : *
1672 : * Note: if context->estate->es_crosscheck_snapshot isn't
1673 : * InvalidSnapshot, we check that the row to be deleted is visible to
1674 : * that snapshot, and throw a can't-serialize error if not. This is a
1675 : * special-case behavior needed for referential integrity updates in
1676 : * transaction-snapshot mode transactions.
1677 : */
1678 1011246 : ldelete:
1679 1011250 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1680 :
1681 1011232 : if (tmresult)
1682 668 : *tmresult = result;
1683 :
1684 1011232 : switch (result)
1685 : {
1686 20 : case TM_SelfModified:
1687 :
1688 : /*
1689 : * The target tuple was already updated or deleted by the
1690 : * current command, or by a later command in the current
1691 : * transaction. The former case is possible in a join DELETE
1692 : * where multiple tuples join to the same target tuple. This
1693 : * is somewhat questionable, but Postgres has always allowed
1694 : * it: we just ignore additional deletion attempts.
1695 : *
1696 : * The latter case arises if the tuple is modified by a
1697 : * command in a BEFORE trigger, or perhaps by a command in a
1698 : * volatile function used in the query. In such situations we
1699 : * should not ignore the deletion, but it is equally unsafe to
1700 : * proceed. We don't want to discard the original DELETE
1701 : * while keeping the triggered actions based on its deletion;
1702 : * and it would be no better to allow the original DELETE
1703 : * while discarding updates that it triggered. The row update
1704 : * carries some information that might be important according
1705 : * to business rules; so throwing an error is the only safe
1706 : * course.
1707 : *
1708 : * If a trigger actually intends this type of interaction, it
1709 : * can re-execute the DELETE and then return NULL to cancel
1710 : * the outer delete.
1711 : */
1712 20 : if (context->tmfd.cmax != estate->es_output_cid)
1713 4 : ereport(ERROR,
1714 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1715 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1716 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1717 :
1718 : /* Else, already deleted by self; nothing to do */
1719 16 : return NULL;
1720 :
1721 1011174 : case TM_Ok:
1722 1011174 : break;
1723 :
1724 35 : case TM_Updated:
1725 : {
1726 : TupleTableSlot *inputslot;
1727 : TupleTableSlot *epqslot;
1728 :
1729 35 : if (IsolationUsesXactSnapshot())
1730 1 : ereport(ERROR,
1731 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1732 : errmsg("could not serialize access due to concurrent update")));
1733 :
1734 : /*
1735 : * Already know that we're going to need to do EPQ, so
1736 : * fetch tuple directly into the right slot.
1737 : */
1738 34 : EvalPlanQualBegin(context->epqstate);
1739 34 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1740 : resultRelInfo->ri_RangeTableIndex);
1741 :
1742 34 : result = table_tuple_lock(resultRelationDesc, tupleid,
1743 : estate->es_snapshot,
1744 : inputslot, estate->es_output_cid,
1745 : LockTupleExclusive, LockWaitBlock,
1746 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1747 : &context->tmfd);
1748 :
1749 30 : switch (result)
1750 : {
1751 27 : case TM_Ok:
1752 : Assert(context->tmfd.traversed);
1753 27 : epqslot = EvalPlanQual(context->epqstate,
1754 : resultRelationDesc,
1755 : resultRelInfo->ri_RangeTableIndex,
1756 : inputslot);
1757 27 : if (TupIsNull(epqslot))
1758 : /* Tuple not passing quals anymore, exiting... */
1759 15 : return NULL;
1760 :
1761 : /*
1762 : * If requested, skip delete and pass back the
1763 : * updated row.
1764 : */
1765 12 : if (epqreturnslot)
1766 : {
1767 8 : *epqreturnslot = epqslot;
1768 8 : return NULL;
1769 : }
1770 : else
1771 4 : goto ldelete;
1772 :
1773 2 : case TM_SelfModified:
1774 :
1775 : /*
1776 : * This can be reached when following an update
1777 : * chain from a tuple updated by another session,
1778 : * reaching a tuple that was already updated in
1779 : * this transaction. If previously updated by this
1780 : * command, ignore the delete, otherwise error
1781 : * out.
1782 : *
1783 : * See also TM_SelfModified response to
1784 : * table_tuple_delete() above.
1785 : */
1786 2 : if (context->tmfd.cmax != estate->es_output_cid)
1787 1 : ereport(ERROR,
1788 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1789 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1790 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1791 1 : return NULL;
1792 :
1793 1 : case TM_Deleted:
1794 : /* tuple already deleted; nothing to do */
1795 1 : return NULL;
1796 :
1797 0 : default:
1798 :
1799 : /*
1800 : * TM_Invisible should be impossible because we're
1801 : * waiting for updated row versions, and would
1802 : * already have errored out if the first version
1803 : * is invisible.
1804 : *
1805 : * TM_Updated should be impossible, because we're
1806 : * locking the latest version via
1807 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1808 : */
1809 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1810 : result);
1811 : return NULL;
1812 : }
1813 :
1814 : Assert(false);
1815 : break;
1816 : }
1817 :
1818 3 : case TM_Deleted:
1819 3 : if (IsolationUsesXactSnapshot())
1820 0 : ereport(ERROR,
1821 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1822 : errmsg("could not serialize access due to concurrent delete")));
1823 : /* tuple already deleted; nothing to do */
1824 3 : return NULL;
1825 :
1826 0 : default:
1827 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1828 : result);
1829 : return NULL;
1830 : }
1831 :
1832 : /*
1833 : * Note: Normally one would think that we have to delete index tuples
1834 : * associated with the heap tuple now...
1835 : *
1836 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1837 : * take care of it later. We can't delete index tuples immediately
1838 : * anyway, since the tuple is still visible to other transactions.
1839 : */
1840 : }
1841 :
1842 1011228 : if (canSetTag)
1843 1010439 : (estate->es_processed)++;
1844 :
1845 : /* Tell caller that the delete actually happened. */
1846 1011228 : if (tupleDeleted)
1847 638 : *tupleDeleted = true;
1848 :
1849 1011228 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1850 :
1851 : /*
1852 : * Process RETURNING if present and if requested.
1853 : *
1854 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1855 : * refers to any OLD column values, save the old tuple here for later
1856 : * processing of the RETURNING list by ExecInsert().
1857 : */
1858 1011321 : saveOld = changingPart && resultRelInfo->ri_projectReturning &&
1859 95 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD;
1860 :
1861 1011226 : if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
1862 : {
1863 : /*
1864 : * We have to put the target tuple into a slot, which means first we
1865 : * gotta fetch it. We can use the trigger tuple slot.
1866 : */
1867 : TupleTableSlot *rslot;
1868 :
1869 616 : if (resultRelInfo->ri_FdwRoutine)
1870 : {
1871 : /* FDW must have provided a slot containing the deleted row */
1872 : Assert(!TupIsNull(slot));
1873 : }
1874 : else
1875 : {
1876 609 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1877 609 : if (oldtuple != NULL)
1878 : {
1879 16 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1880 : }
1881 : else
1882 : {
1883 593 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1884 : SnapshotAny, slot))
1885 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1886 : }
1887 : }
1888 :
1889 : /*
1890 : * If required, save the old tuple for later processing of the
1891 : * RETURNING list by ExecInsert().
1892 : */
1893 616 : if (saveOld)
1894 : {
1895 : TupleConversionMap *tupconv_map;
1896 :
1897 : /*
1898 : * Convert the tuple into the root partition's format/slot, if
1899 : * needed. ExecInsert() will then convert it to the new
1900 : * partition's format/slot, if necessary.
1901 : */
1902 30 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1903 30 : if (tupconv_map != NULL)
1904 : {
1905 12 : ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
1906 12 : TupleTableSlot *oldSlot = slot;
1907 :
1908 12 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1909 : slot,
1910 : ExecGetReturningSlot(estate,
1911 : rootRelInfo));
1912 :
1913 12 : slot->tts_tableOid = oldSlot->tts_tableOid;
1914 12 : ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
1915 : }
1916 :
1917 30 : context->cpDeletedSlot = slot;
1918 :
1919 30 : return NULL;
1920 : }
1921 :
1922 586 : rslot = ExecProcessReturning(context, resultRelInfo, true,
1923 : slot, NULL, context->planSlot);
1924 :
1925 : /*
1926 : * Before releasing the target tuple again, make sure rslot has a
1927 : * local copy of any pass-by-reference values.
1928 : */
1929 586 : ExecMaterializeSlot(rslot);
1930 :
1931 586 : ExecClearTuple(slot);
1932 :
1933 586 : return rslot;
1934 : }
1935 :
1936 1010610 : return NULL;
1937 : }
1938 :
1939 : /*
1940 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1941 : *
1942 : * This works by first deleting the old tuple from the current partition,
1943 : * followed by inserting the new tuple into the root parent table, that is,
1944 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1945 : * correct partition.
1946 : *
1947 : * Returns true if the tuple has been successfully moved, or if it's found
1948 : * that the tuple was concurrently deleted so there's nothing more to do
1949 : * for the caller.
1950 : *
1951 : * False is returned if the tuple we're trying to move is found to have been
1952 : * concurrently updated. In that case, the caller must check if the updated
1953 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1954 : * this function again or perform a regular update accordingly. For MERGE,
1955 : * the updated tuple is not returned in *retry_slot; it has its own retry
1956 : * logic.
1957 : */
1958 : static bool
1959 722 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1960 : ResultRelInfo *resultRelInfo,
1961 : ItemPointer tupleid, HeapTuple oldtuple,
1962 : TupleTableSlot *slot,
1963 : bool canSetTag,
1964 : UpdateContext *updateCxt,
1965 : TM_Result *tmresult,
1966 : TupleTableSlot **retry_slot,
1967 : TupleTableSlot **inserted_tuple,
1968 : ResultRelInfo **insert_destrel)
1969 : {
1970 722 : ModifyTableState *mtstate = context->mtstate;
1971 722 : EState *estate = mtstate->ps.state;
1972 : TupleConversionMap *tupconv_map;
1973 : bool tuple_deleted;
1974 722 : TupleTableSlot *epqslot = NULL;
1975 :
1976 722 : context->cpDeletedSlot = NULL;
1977 722 : context->cpUpdateReturningSlot = NULL;
1978 722 : *retry_slot = NULL;
1979 :
1980 : /*
1981 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1982 : * to migrate to a different partition. Maybe this can be implemented
1983 : * some day, but it seems a fringe feature with little redeeming value.
1984 : */
1985 722 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1986 0 : ereport(ERROR,
1987 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1988 : errmsg("invalid ON UPDATE specification"),
1989 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1990 :
1991 : /*
1992 : * When an UPDATE is run directly on a leaf partition, simply fail with a
1993 : * partition constraint violation error.
1994 : */
1995 722 : if (resultRelInfo == mtstate->rootResultRelInfo)
1996 32 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1997 :
1998 : /* Initialize tuple routing info if not already done. */
1999 690 : if (mtstate->mt_partition_tuple_routing == NULL)
2000 : {
2001 439 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
2002 : MemoryContext oldcxt;
2003 :
2004 : /* Things built here have to last for the query duration. */
2005 439 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
2006 :
2007 439 : mtstate->mt_partition_tuple_routing =
2008 439 : ExecSetupPartitionTupleRouting(estate, rootRel);
2009 :
2010 : /*
2011 : * Before a partition's tuple can be re-routed, it must first be
2012 : * converted to the root's format, so we'll need a slot for storing
2013 : * such tuples.
2014 : */
2015 : Assert(mtstate->mt_root_tuple_slot == NULL);
2016 439 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
2017 :
2018 439 : MemoryContextSwitchTo(oldcxt);
2019 : }
2020 :
2021 : /*
2022 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
2023 : * We want to return rows from INSERT.
2024 : */
2025 690 : ExecDelete(context, resultRelInfo,
2026 : tupleid, oldtuple,
2027 : false, /* processReturning */
2028 : true, /* changingPart */
2029 : false, /* canSetTag */
2030 : tmresult, &tuple_deleted, &epqslot);
2031 :
2032 : /*
2033 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
2034 : * it was already deleted by self, or it was concurrently deleted by
2035 : * another transaction), then we should skip the insert as well;
2036 : * otherwise, an UPDATE could cause an increase in the total number of
2037 : * rows across all partitions, which is clearly wrong.
2038 : *
2039 : * For a normal UPDATE, the case where the tuple has been the subject of a
2040 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
2041 : * machinery, but for an UPDATE that we've translated into a DELETE from
2042 : * this partition and an INSERT into some other partition, that's not
2043 : * available, because CTID chains can't span relation boundaries. We
2044 : * mimic the semantics to a limited extent by skipping the INSERT if the
2045 : * DELETE fails to find a tuple. This ensures that two concurrent
2046 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
2047 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
2048 : * it.
2049 : */
2050 687 : if (!tuple_deleted)
2051 : {
2052 : /*
2053 : * epqslot will be typically NULL. But when ExecDelete() finds that
2054 : * another transaction has concurrently updated the same row, it
2055 : * re-fetches the row, skips the delete, and epqslot is set to the
2056 : * re-fetched tuple slot. In that case, we need to do all the checks
2057 : * again. For MERGE, we leave everything to the caller (it must do
2058 : * additional rechecking, and might end up executing a different
2059 : * action entirely).
2060 : */
2061 49 : if (mtstate->operation == CMD_MERGE)
2062 23 : return *tmresult == TM_Ok;
2063 26 : else if (TupIsNull(epqslot))
2064 23 : return true;
2065 : else
2066 : {
2067 : /* Fetch the most recent version of old tuple. */
2068 : TupleTableSlot *oldSlot;
2069 :
2070 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
2071 3 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2072 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
2073 3 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2074 3 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2075 : tupleid,
2076 : SnapshotAny,
2077 : oldSlot))
2078 0 : elog(ERROR, "failed to fetch tuple being updated");
2079 : /* and project the new tuple to retry the UPDATE with */
2080 3 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2081 : oldSlot);
2082 3 : return false;
2083 : }
2084 : }
2085 :
2086 : /*
2087 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
2088 : * convert the tuple into root's tuple descriptor if needed, since
2089 : * ExecInsert() starts the search from root.
2090 : */
2091 638 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2092 638 : if (tupconv_map != NULL)
2093 209 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2094 : slot,
2095 : mtstate->mt_root_tuple_slot);
2096 :
2097 : /* Tuple routing starts from the root table. */
2098 555 : context->cpUpdateReturningSlot =
2099 638 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2100 : inserted_tuple, insert_destrel);
2101 :
2102 : /*
2103 : * Reset the transition state that may possibly have been written by
2104 : * INSERT.
2105 : */
2106 555 : if (mtstate->mt_transition_capture)
2107 36 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
2108 :
2109 : /* We're done moving. */
2110 555 : return true;
2111 : }
2112 :
2113 : /*
2114 : * ExecUpdatePrologue -- subroutine for ExecUpdate
2115 : *
2116 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
2117 : * triggers. We return false if one of them makes the update a no-op;
2118 : * otherwise, return true.
2119 : */
2120 : static bool
2121 173918 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2122 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2123 : TM_Result *result)
2124 : {
2125 173918 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2126 :
2127 173918 : if (result)
2128 1413 : *result = TM_Ok;
2129 :
2130 173918 : ExecMaterializeSlot(slot);
2131 :
2132 : /*
2133 : * Open the table's indexes, if we have not done so already, so that we
2134 : * can add new index entries for the updated tuple.
2135 : */
2136 173918 : if (resultRelationDesc->rd_rel->relhasindex &&
2137 125039 : resultRelInfo->ri_IndexRelationDescs == NULL)
2138 5416 : ExecOpenIndices(resultRelInfo, false);
2139 :
2140 : /* BEFORE ROW UPDATE triggers */
2141 173918 : if (resultRelInfo->ri_TrigDesc &&
2142 3945 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
2143 : {
2144 : /* Flush any pending inserts, so rows are visible to the triggers */
2145 1557 : if (context->estate->es_insert_pending_result_relations != NIL)
2146 1 : ExecPendingInserts(context->estate);
2147 :
2148 1545 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
2149 : resultRelInfo, tupleid, oldtuple, slot,
2150 : result, &context->tmfd,
2151 1557 : context->mtstate->operation == CMD_MERGE);
2152 : }
2153 :
2154 172361 : return true;
2155 : }
2156 :
2157 : /*
2158 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2159 : *
2160 : * Apply the final modifications to the tuple slot before the update.
2161 : * (This is split out because we also need it in the foreign-table code path.)
2162 : */
2163 : static void
2164 173731 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
2165 : TupleTableSlot *slot,
2166 : EState *estate)
2167 : {
2168 173731 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2169 :
2170 : /*
2171 : * Constraints and GENERATED expressions might reference the tableoid
2172 : * column, so (re-)initialize tts_tableOid before evaluating them.
2173 : */
2174 173731 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2175 :
2176 : /*
2177 : * Compute stored generated columns
2178 : */
2179 173731 : if (resultRelationDesc->rd_att->constr &&
2180 102033 : resultRelationDesc->rd_att->constr->has_generated_stored)
2181 158 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2182 : CMD_UPDATE);
2183 173731 : }
2184 :
2185 : /*
2186 : * ExecUpdateAct -- subroutine for ExecUpdate
2187 : *
2188 : * Actually update the tuple, when operating on a plain table. If the
2189 : * table is a partition, and the command was called referencing an ancestor
2190 : * partitioned table, this routine migrates the resulting tuple to another
2191 : * partition.
2192 : *
2193 : * The caller is in charge of keeping indexes current as necessary. The
2194 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
2195 : * be concurrently updated. However, in case of a cross-partition update,
2196 : * this routine does it.
2197 : */
2198 : static TM_Result
2199 173633 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2200 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2201 : bool canSetTag, UpdateContext *updateCxt)
2202 : {
2203 173633 : EState *estate = context->estate;
2204 173633 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2205 : bool partition_constraint_failed;
2206 : TM_Result result;
2207 :
2208 173633 : updateCxt->crossPartUpdate = false;
2209 :
2210 : /*
2211 : * If we move the tuple to a new partition, we loop back here to recompute
2212 : * GENERATED values (which are allowed to be different across partitions)
2213 : * and recheck any RLS policies and constraints. We do not fire any
2214 : * BEFORE triggers of the new partition, however.
2215 : */
2216 173636 : lreplace:
2217 : /* Fill in GENERATEd columns */
2218 173636 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2219 :
2220 : /* ensure slot is independent, consider e.g. EPQ */
2221 173636 : ExecMaterializeSlot(slot);
2222 :
2223 : /*
2224 : * If partition constraint fails, this row might get moved to another
2225 : * partition, in which case we should check the RLS CHECK policy just
2226 : * before inserting into the new partition, rather than doing it here.
2227 : * This is because a trigger on that partition might again change the row.
2228 : * So skip the WCO checks if the partition constraint fails.
2229 : */
2230 173636 : partition_constraint_failed =
2231 175409 : resultRelationDesc->rd_rel->relispartition &&
2232 1773 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2233 :
2234 : /* Check any RLS UPDATE WITH CHECK policies */
2235 173636 : if (!partition_constraint_failed &&
2236 172914 : resultRelInfo->ri_WithCheckOptions != NIL)
2237 : {
2238 : /*
2239 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2240 : * we are looking for at this point.
2241 : */
2242 356 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2243 : resultRelInfo, slot, estate);
2244 : }
2245 :
2246 : /*
2247 : * If a partition check failed, try to move the row into the right
2248 : * partition.
2249 : */
2250 173600 : if (partition_constraint_failed)
2251 : {
2252 : TupleTableSlot *inserted_tuple,
2253 : *retry_slot;
2254 722 : ResultRelInfo *insert_destrel = NULL;
2255 :
2256 : /*
2257 : * ExecCrossPartitionUpdate will first DELETE the row from the
2258 : * partition it's currently in and then insert it back into the root
2259 : * table, which will re-route it to the correct partition. However,
2260 : * if the tuple has been concurrently updated, a retry is needed.
2261 : */
2262 722 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2263 : tupleid, oldtuple, slot,
2264 : canSetTag, updateCxt,
2265 : &result,
2266 : &retry_slot,
2267 : &inserted_tuple,
2268 : &insert_destrel))
2269 : {
2270 : /* success! */
2271 594 : updateCxt->crossPartUpdate = true;
2272 :
2273 : /*
2274 : * If the partitioned table being updated is referenced in foreign
2275 : * keys, queue up trigger events to check that none of them were
2276 : * violated. No special treatment is needed in
2277 : * non-cross-partition update situations, because the leaf
2278 : * partition's AR update triggers will take care of that. During
2279 : * cross-partition updates implemented as delete on the source
2280 : * partition followed by insert on the destination partition,
2281 : * AR-UPDATE triggers of the root table (that is, the table
2282 : * mentioned in the query) must be fired.
2283 : *
2284 : * NULL insert_destrel means that the move failed to occur, that
2285 : * is, the update failed, so no need to anything in that case.
2286 : */
2287 594 : if (insert_destrel &&
2288 537 : resultRelInfo->ri_TrigDesc &&
2289 242 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2290 202 : ExecCrossPartitionUpdateForeignKey(context,
2291 : resultRelInfo,
2292 : insert_destrel,
2293 : tupleid, slot,
2294 : inserted_tuple);
2295 :
2296 597 : return TM_Ok;
2297 : }
2298 :
2299 : /*
2300 : * No luck, a retry is needed. If running MERGE, we do not do so
2301 : * here; instead let it handle that on its own rules.
2302 : */
2303 10 : if (context->mtstate->operation == CMD_MERGE)
2304 7 : return result;
2305 :
2306 : /*
2307 : * ExecCrossPartitionUpdate installed an updated version of the new
2308 : * tuple in the retry slot; start over.
2309 : */
2310 3 : slot = retry_slot;
2311 3 : goto lreplace;
2312 : }
2313 :
2314 : /*
2315 : * Check the constraints of the tuple. We've already checked the
2316 : * partition constraint above; however, we must still ensure the tuple
2317 : * passes all other constraints, so we will call ExecConstraints() and
2318 : * have it validate all remaining checks.
2319 : */
2320 172878 : if (resultRelationDesc->rd_att->constr)
2321 101648 : ExecConstraints(resultRelInfo, slot, estate);
2322 :
2323 : /*
2324 : * replace the heap tuple
2325 : *
2326 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2327 : * the row to be updated is visible to that snapshot, and throw a
2328 : * can't-serialize error if not. This is a special-case behavior needed
2329 : * for referential integrity updates in transaction-snapshot mode
2330 : * transactions.
2331 : */
2332 172830 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2333 : estate->es_output_cid,
2334 : estate->es_snapshot,
2335 : estate->es_crosscheck_snapshot,
2336 : true /* wait for commit */ ,
2337 : &context->tmfd, &updateCxt->lockmode,
2338 : &updateCxt->updateIndexes);
2339 :
2340 172818 : return result;
2341 : }
2342 :
2343 : /*
2344 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2345 : *
2346 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2347 : * returns indicating that the tuple was updated.
2348 : */
2349 : static void
2350 172846 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2351 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2352 : HeapTuple oldtuple, TupleTableSlot *slot)
2353 : {
2354 172846 : ModifyTableState *mtstate = context->mtstate;
2355 172846 : List *recheckIndexes = NIL;
2356 :
2357 : /* insert index entries for tuple if necessary */
2358 172846 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2359 : {
2360 94967 : bits32 flags = EIIT_IS_UPDATE;
2361 :
2362 94967 : if (updateCxt->updateIndexes == TU_Summarizing)
2363 2188 : flags |= EIIT_ONLY_SUMMARIZING;
2364 94967 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo, context->estate,
2365 : flags, slot, NIL,
2366 : NULL);
2367 : }
2368 :
2369 : /* AFTER ROW UPDATE Triggers */
2370 172785 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2371 : NULL, NULL,
2372 : tupleid, oldtuple, slot,
2373 : recheckIndexes,
2374 172785 : mtstate->operation == CMD_INSERT ?
2375 : mtstate->mt_oc_transition_capture :
2376 : mtstate->mt_transition_capture,
2377 : false);
2378 :
2379 172783 : list_free(recheckIndexes);
2380 :
2381 : /*
2382 : * Check any WITH CHECK OPTION constraints from parent views. We are
2383 : * required to do this after testing all constraints and uniqueness
2384 : * violations per the SQL spec, so we do it after actually updating the
2385 : * record in the heap and all indexes.
2386 : *
2387 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2388 : * are looking for at this point.
2389 : */
2390 172783 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2391 337 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2392 : slot, context->estate);
2393 172729 : }
2394 :
2395 : /*
2396 : * Queues up an update event using the target root partitioned table's
2397 : * trigger to check that a cross-partition update hasn't broken any foreign
2398 : * keys pointing into it.
2399 : */
2400 : static void
2401 202 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2402 : ResultRelInfo *sourcePartInfo,
2403 : ResultRelInfo *destPartInfo,
2404 : ItemPointer tupleid,
2405 : TupleTableSlot *oldslot,
2406 : TupleTableSlot *newslot)
2407 : {
2408 : ListCell *lc;
2409 : ResultRelInfo *rootRelInfo;
2410 : List *ancestorRels;
2411 :
2412 202 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2413 202 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2414 :
2415 : /*
2416 : * For any foreign keys that point directly into a non-root ancestors of
2417 : * the source partition, we can in theory fire an update event to enforce
2418 : * those constraints using their triggers, if we could tell that both the
2419 : * source and the destination partitions are under the same ancestor. But
2420 : * for now, we simply report an error that those cannot be enforced.
2421 : */
2422 440 : foreach(lc, ancestorRels)
2423 : {
2424 242 : ResultRelInfo *rInfo = lfirst(lc);
2425 242 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2426 242 : bool has_noncloned_fkey = false;
2427 :
2428 : /* Root ancestor's triggers will be processed. */
2429 242 : if (rInfo == rootRelInfo)
2430 198 : continue;
2431 :
2432 44 : if (trigdesc && trigdesc->trig_update_after_row)
2433 : {
2434 152 : for (int i = 0; i < trigdesc->numtriggers; i++)
2435 : {
2436 112 : Trigger *trig = &trigdesc->triggers[i];
2437 :
2438 116 : if (!trig->tgisclone &&
2439 4 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2440 : {
2441 4 : has_noncloned_fkey = true;
2442 4 : break;
2443 : }
2444 : }
2445 : }
2446 :
2447 44 : if (has_noncloned_fkey)
2448 4 : ereport(ERROR,
2449 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2450 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2451 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2452 : RelationGetRelationName(rInfo->ri_RelationDesc),
2453 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2454 : errhint("Consider defining the foreign key on table \"%s\".",
2455 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2456 : }
2457 :
2458 : /* Perform the root table's triggers. */
2459 198 : ExecARUpdateTriggers(context->estate,
2460 : rootRelInfo, sourcePartInfo, destPartInfo,
2461 : tupleid, NULL, newslot, NIL, NULL, true);
2462 198 : }
2463 :
2464 : /* ----------------------------------------------------------------
2465 : * ExecUpdate
2466 : *
2467 : * note: we can't run UPDATE queries with transactions
2468 : * off because UPDATEs are actually INSERTs and our
2469 : * scan will mistakenly loop forever, updating the tuple
2470 : * it just inserted.. This should be fixed but until it
2471 : * is, we don't want to get stuck in an infinite loop
2472 : * which corrupts your database..
2473 : *
2474 : * When updating a table, tupleid identifies the tuple to update and
2475 : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2476 : * oldtuple is passed to the triggers and identifies what to update, and
2477 : * tupleid is invalid. When updating a foreign table, tupleid is
2478 : * invalid; the FDW has to figure out which row to update using data from
2479 : * the planSlot. oldtuple is passed to foreign table triggers; it is
2480 : * NULL when the foreign table has no relevant triggers.
2481 : *
2482 : * oldSlot contains the old tuple value.
2483 : * slot contains the new tuple value to be stored.
2484 : * planSlot is the output of the ModifyTable's subplan; we use it
2485 : * to access values from other input tables (for RETURNING),
2486 : * row-ID junk columns, etc.
2487 : *
2488 : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2489 : * had identified the tuple to update, it will identify the tuple
2490 : * actually updated after EvalPlanQual.
2491 : * ----------------------------------------------------------------
2492 : */
2493 : static TupleTableSlot *
2494 172505 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2495 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2496 : TupleTableSlot *slot, bool canSetTag)
2497 : {
2498 172505 : EState *estate = context->estate;
2499 172505 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2500 172505 : UpdateContext updateCxt = {0};
2501 : TM_Result result;
2502 :
2503 : /*
2504 : * abort the operation if not running transactions
2505 : */
2506 172505 : if (IsBootstrapProcessingMode())
2507 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2508 :
2509 : /*
2510 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2511 : * done if it says we are.
2512 : */
2513 172505 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2514 85 : return NULL;
2515 :
2516 : /* INSTEAD OF ROW UPDATE Triggers */
2517 172408 : if (resultRelInfo->ri_TrigDesc &&
2518 3607 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2519 : {
2520 83 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2521 : oldtuple, slot))
2522 12 : return NULL; /* "do nothing" */
2523 : }
2524 172325 : else if (resultRelInfo->ri_FdwRoutine)
2525 : {
2526 : /* Fill in GENERATEd columns */
2527 95 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2528 :
2529 : /*
2530 : * update in foreign table: let the FDW do it
2531 : */
2532 95 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2533 : resultRelInfo,
2534 : slot,
2535 : context->planSlot);
2536 :
2537 95 : if (slot == NULL) /* "do nothing" */
2538 1 : return NULL;
2539 :
2540 : /*
2541 : * AFTER ROW Triggers or RETURNING expressions might reference the
2542 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2543 : * them. (This covers the case where the FDW replaced the slot.)
2544 : */
2545 94 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2546 : }
2547 : else
2548 : {
2549 : ItemPointerData lockedtid;
2550 :
2551 : /*
2552 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2553 : * must loop back here to try again. (We don't need to redo triggers,
2554 : * however. If there are any BEFORE triggers then trigger.c will have
2555 : * done table_tuple_lock to lock the correct tuple, so there's no need
2556 : * to do them again.)
2557 : */
2558 172230 : redo_act:
2559 172283 : lockedtid = *tupleid;
2560 172283 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2561 : canSetTag, &updateCxt);
2562 :
2563 : /*
2564 : * If ExecUpdateAct reports that a cross-partition update was done,
2565 : * then the RETURNING tuple (if any) has been projected and there's
2566 : * nothing else for us to do.
2567 : */
2568 172080 : if (updateCxt.crossPartUpdate)
2569 580 : return context->cpUpdateReturningSlot;
2570 :
2571 171579 : switch (result)
2572 : {
2573 56 : case TM_SelfModified:
2574 :
2575 : /*
2576 : * The target tuple was already updated or deleted by the
2577 : * current command, or by a later command in the current
2578 : * transaction. The former case is possible in a join UPDATE
2579 : * where multiple tuples join to the same target tuple. This
2580 : * is pretty questionable, but Postgres has always allowed it:
2581 : * we just execute the first update action and ignore
2582 : * additional update attempts.
2583 : *
2584 : * The latter case arises if the tuple is modified by a
2585 : * command in a BEFORE trigger, or perhaps by a command in a
2586 : * volatile function used in the query. In such situations we
2587 : * should not ignore the update, but it is equally unsafe to
2588 : * proceed. We don't want to discard the original UPDATE
2589 : * while keeping the triggered actions based on it; and we
2590 : * have no principled way to merge this update with the
2591 : * previous ones. So throwing an error is the only safe
2592 : * course.
2593 : *
2594 : * If a trigger actually intends this type of interaction, it
2595 : * can re-execute the UPDATE (assuming it can figure out how)
2596 : * and then return NULL to cancel the outer update.
2597 : */
2598 56 : if (context->tmfd.cmax != estate->es_output_cid)
2599 4 : ereport(ERROR,
2600 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2601 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2602 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2603 :
2604 : /* Else, already updated by self; nothing to do */
2605 52 : return NULL;
2606 :
2607 171438 : case TM_Ok:
2608 171438 : break;
2609 :
2610 81 : case TM_Updated:
2611 : {
2612 : TupleTableSlot *inputslot;
2613 : TupleTableSlot *epqslot;
2614 :
2615 81 : if (IsolationUsesXactSnapshot())
2616 2 : ereport(ERROR,
2617 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2618 : errmsg("could not serialize access due to concurrent update")));
2619 :
2620 : /*
2621 : * Already know that we're going to need to do EPQ, so
2622 : * fetch tuple directly into the right slot.
2623 : */
2624 79 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2625 : resultRelInfo->ri_RangeTableIndex);
2626 :
2627 79 : result = table_tuple_lock(resultRelationDesc, tupleid,
2628 : estate->es_snapshot,
2629 : inputslot, estate->es_output_cid,
2630 : updateCxt.lockmode, LockWaitBlock,
2631 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2632 : &context->tmfd);
2633 :
2634 77 : switch (result)
2635 : {
2636 72 : case TM_Ok:
2637 : Assert(context->tmfd.traversed);
2638 :
2639 72 : epqslot = EvalPlanQual(context->epqstate,
2640 : resultRelationDesc,
2641 : resultRelInfo->ri_RangeTableIndex,
2642 : inputslot);
2643 72 : if (TupIsNull(epqslot))
2644 : /* Tuple not passing quals anymore, exiting... */
2645 19 : return NULL;
2646 :
2647 : /* Make sure ri_oldTupleSlot is initialized. */
2648 53 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2649 0 : ExecInitUpdateProjection(context->mtstate,
2650 : resultRelInfo);
2651 :
2652 53 : if (resultRelInfo->ri_needLockTagTuple)
2653 : {
2654 1 : UnlockTuple(resultRelationDesc,
2655 : &lockedtid, InplaceUpdateTupleLock);
2656 1 : LockTuple(resultRelationDesc,
2657 : tupleid, InplaceUpdateTupleLock);
2658 : }
2659 :
2660 : /* Fetch the most recent version of old tuple. */
2661 53 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2662 53 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2663 : tupleid,
2664 : SnapshotAny,
2665 : oldSlot))
2666 0 : elog(ERROR, "failed to fetch tuple being updated");
2667 53 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2668 : epqslot, oldSlot);
2669 53 : goto redo_act;
2670 :
2671 1 : case TM_Deleted:
2672 : /* tuple already deleted; nothing to do */
2673 1 : return NULL;
2674 :
2675 4 : case TM_SelfModified:
2676 :
2677 : /*
2678 : * This can be reached when following an update
2679 : * chain from a tuple updated by another session,
2680 : * reaching a tuple that was already updated in
2681 : * this transaction. If previously modified by
2682 : * this command, ignore the redundant update,
2683 : * otherwise error out.
2684 : *
2685 : * See also TM_SelfModified response to
2686 : * table_tuple_update() above.
2687 : */
2688 4 : if (context->tmfd.cmax != estate->es_output_cid)
2689 1 : ereport(ERROR,
2690 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2691 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2692 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2693 3 : return NULL;
2694 :
2695 0 : default:
2696 : /* see table_tuple_lock call in ExecDelete() */
2697 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2698 : result);
2699 : return NULL;
2700 : }
2701 : }
2702 :
2703 : break;
2704 :
2705 4 : case TM_Deleted:
2706 4 : if (IsolationUsesXactSnapshot())
2707 0 : ereport(ERROR,
2708 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2709 : errmsg("could not serialize access due to concurrent delete")));
2710 : /* tuple already deleted; nothing to do */
2711 4 : return NULL;
2712 :
2713 0 : default:
2714 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2715 : result);
2716 : return NULL;
2717 : }
2718 : }
2719 :
2720 171595 : if (canSetTag)
2721 171198 : (estate->es_processed)++;
2722 :
2723 171595 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2724 : slot);
2725 :
2726 : /* Process RETURNING if present */
2727 171486 : if (resultRelInfo->ri_projectReturning)
2728 1455 : return ExecProcessReturning(context, resultRelInfo, false,
2729 : oldSlot, slot, context->planSlot);
2730 :
2731 170031 : return NULL;
2732 : }
2733 :
2734 : /*
2735 : * ExecOnConflictLockRow --- lock the row for ON CONFLICT DO SELECT/UPDATE
2736 : *
2737 : * Try to lock tuple for update as part of speculative insertion for ON
2738 : * CONFLICT DO UPDATE or ON CONFLICT DO SELECT FOR UPDATE/SHARE.
2739 : *
2740 : * Returns true if the row is successfully locked, or false if the caller must
2741 : * retry the INSERT from scratch.
2742 : */
2743 : static bool
2744 2811 : ExecOnConflictLockRow(ModifyTableContext *context,
2745 : TupleTableSlot *existing,
2746 : ItemPointer conflictTid,
2747 : Relation relation,
2748 : LockTupleMode lockmode,
2749 : bool isUpdate)
2750 : {
2751 : TM_FailureData tmfd;
2752 : TM_Result test;
2753 : Datum xminDatum;
2754 : TransactionId xmin;
2755 : bool isnull;
2756 :
2757 : /*
2758 : * Lock tuple with lockmode. Don't follow updates when tuple cannot be
2759 : * locked without doing so. A row locking conflict here means our
2760 : * previous conclusion that the tuple is conclusively committed is not
2761 : * true anymore.
2762 : */
2763 2811 : test = table_tuple_lock(relation, conflictTid,
2764 2811 : context->estate->es_snapshot,
2765 2811 : existing, context->estate->es_output_cid,
2766 : lockmode, LockWaitBlock, 0,
2767 : &tmfd);
2768 2811 : switch (test)
2769 : {
2770 2780 : case TM_Ok:
2771 : /* success! */
2772 2780 : break;
2773 :
2774 28 : case TM_Invisible:
2775 :
2776 : /*
2777 : * This can occur when a just inserted tuple is updated again in
2778 : * the same command. E.g. because multiple rows with the same
2779 : * conflicting key values are inserted.
2780 : *
2781 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2782 : * case. We do not want to proceed because it would lead to the
2783 : * same row being updated a second time in some unspecified order,
2784 : * and in contrast to plain UPDATEs there's no historical behavior
2785 : * to break.
2786 : *
2787 : * It is the user's responsibility to prevent this situation from
2788 : * occurring. These problems are why the SQL standard similarly
2789 : * specifies that for SQL MERGE, an exception must be raised in
2790 : * the event of an attempt to update the same row twice.
2791 : */
2792 28 : xminDatum = slot_getsysattr(existing,
2793 : MinTransactionIdAttributeNumber,
2794 : &isnull);
2795 : Assert(!isnull);
2796 28 : xmin = DatumGetTransactionId(xminDatum);
2797 :
2798 28 : if (TransactionIdIsCurrentTransactionId(xmin))
2799 28 : ereport(ERROR,
2800 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2801 : /* translator: %s is a SQL command name */
2802 : errmsg("%s command cannot affect row a second time",
2803 : isUpdate ? "ON CONFLICT DO UPDATE" : "ON CONFLICT DO SELECT"),
2804 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2805 :
2806 : /* This shouldn't happen */
2807 0 : elog(ERROR, "attempted to lock invisible tuple");
2808 : break;
2809 :
2810 0 : case TM_SelfModified:
2811 :
2812 : /*
2813 : * This state should never be reached. As a dirty snapshot is used
2814 : * to find conflicting tuples, speculative insertion wouldn't have
2815 : * seen this row to conflict with.
2816 : */
2817 0 : elog(ERROR, "unexpected self-updated tuple");
2818 : break;
2819 :
2820 2 : case TM_Updated:
2821 2 : if (IsolationUsesXactSnapshot())
2822 0 : ereport(ERROR,
2823 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2824 : errmsg("could not serialize access due to concurrent update")));
2825 :
2826 : /*
2827 : * Tell caller to try again from the very start.
2828 : *
2829 : * It does not make sense to use the usual EvalPlanQual() style
2830 : * loop here, as the new version of the row might not conflict
2831 : * anymore, or the conflicting tuple has actually been deleted.
2832 : */
2833 2 : ExecClearTuple(existing);
2834 2 : return false;
2835 :
2836 1 : case TM_Deleted:
2837 1 : if (IsolationUsesXactSnapshot())
2838 0 : ereport(ERROR,
2839 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2840 : errmsg("could not serialize access due to concurrent delete")));
2841 :
2842 : /* see TM_Updated case */
2843 1 : ExecClearTuple(existing);
2844 1 : return false;
2845 :
2846 0 : default:
2847 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2848 : }
2849 :
2850 : /* Success, the tuple is locked. */
2851 2780 : return true;
2852 : }
2853 :
2854 : /*
2855 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2856 : *
2857 : * Try to lock tuple for update as part of speculative insertion. If
2858 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2859 : * (but still lock row, even though it may not satisfy estate's
2860 : * snapshot).
2861 : *
2862 : * Returns true if we're done (with or without an update), or false if
2863 : * the caller must retry the INSERT from scratch.
2864 : */
2865 : static bool
2866 2741 : ExecOnConflictUpdate(ModifyTableContext *context,
2867 : ResultRelInfo *resultRelInfo,
2868 : ItemPointer conflictTid,
2869 : TupleTableSlot *excludedSlot,
2870 : bool canSetTag,
2871 : TupleTableSlot **returning)
2872 : {
2873 2741 : ModifyTableState *mtstate = context->mtstate;
2874 2741 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2875 2741 : Relation relation = resultRelInfo->ri_RelationDesc;
2876 2741 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2877 2741 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2878 : LockTupleMode lockmode;
2879 :
2880 : /*
2881 : * Parse analysis should have blocked ON CONFLICT for all system
2882 : * relations, which includes these. There's no fundamental obstacle to
2883 : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2884 : * ExecUpdate() caller.
2885 : */
2886 : Assert(!resultRelInfo->ri_needLockTagTuple);
2887 :
2888 : /* Determine lock mode to use */
2889 2741 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2890 :
2891 : /* Lock tuple for update */
2892 2741 : if (!ExecOnConflictLockRow(context, existing, conflictTid,
2893 : resultRelInfo->ri_RelationDesc, lockmode, true))
2894 3 : return false;
2895 :
2896 : /*
2897 : * Verify that the tuple is visible to our MVCC snapshot if the current
2898 : * isolation level mandates that.
2899 : *
2900 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2901 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2902 : *
2903 : * This means we only ever continue when a new command in the current
2904 : * transaction could see the row, even though in READ COMMITTED mode the
2905 : * tuple will not be visible according to the current statement's
2906 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2907 : * versions.
2908 : */
2909 2722 : ExecCheckTupleVisible(context->estate, relation, existing);
2910 :
2911 : /*
2912 : * Make tuple and any needed join variables available to ExecQual and
2913 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2914 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2915 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2916 : * other redirection.
2917 : */
2918 2722 : econtext->ecxt_scantuple = existing;
2919 2722 : econtext->ecxt_innertuple = excludedSlot;
2920 2722 : econtext->ecxt_outertuple = NULL;
2921 :
2922 2722 : if (!ExecQual(onConflictSetWhere, econtext))
2923 : {
2924 21 : ExecClearTuple(existing); /* see return below */
2925 21 : InstrCountFiltered1(&mtstate->ps, 1);
2926 21 : return true; /* done with the tuple */
2927 : }
2928 :
2929 2701 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2930 : {
2931 : /*
2932 : * Check target's existing tuple against UPDATE-applicable USING
2933 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2934 : *
2935 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2936 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK.
2937 : * Since SELECT permission on the target table is always required for
2938 : * INSERT ... ON CONFLICT DO UPDATE, the rewriter also adds SELECT RLS
2939 : * checks/WCOs for SELECT security quals, using WCOs of the same kind,
2940 : * and this check enforces them too.
2941 : *
2942 : * The rewriter will also have associated UPDATE-applicable straight
2943 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2944 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2945 : * kinds, so there is no danger of spurious over-enforcement in the
2946 : * INSERT or UPDATE path.
2947 : */
2948 48 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2949 : existing,
2950 : mtstate->ps.state);
2951 : }
2952 :
2953 : /* Project the new tuple version */
2954 2685 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2955 :
2956 : /*
2957 : * Note that it is possible that the target tuple has been modified in
2958 : * this session, after the above table_tuple_lock. We choose to not error
2959 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2960 : * This can happen if an UPDATE is triggered from within ExecQual(),
2961 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2962 : * wCTE in the ON CONFLICT's SET.
2963 : */
2964 :
2965 : /* Execute UPDATE with projection */
2966 5350 : *returning = ExecUpdate(context, resultRelInfo,
2967 : conflictTid, NULL, existing,
2968 2685 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2969 : canSetTag);
2970 :
2971 : /*
2972 : * Clear out existing tuple, as there might not be another conflict among
2973 : * the next input rows. Don't want to hold resources till the end of the
2974 : * query. First though, make sure that the returning slot, if any, has a
2975 : * local copy of any OLD pass-by-reference values, if it refers to any OLD
2976 : * columns.
2977 : */
2978 2665 : if (*returning != NULL &&
2979 154 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
2980 8 : ExecMaterializeSlot(*returning);
2981 :
2982 2665 : ExecClearTuple(existing);
2983 :
2984 2665 : return true;
2985 : }
2986 :
2987 : /*
2988 : * ExecOnConflictSelect --- execute SELECT of INSERT ON CONFLICT DO SELECT
2989 : *
2990 : * If SELECT FOR UPDATE/SHARE is specified, try to lock tuple as part of
2991 : * speculative insertion. If a qual originating from ON CONFLICT DO SELECT is
2992 : * satisfied, select (but still lock row, even though it may not satisfy
2993 : * estate's snapshot).
2994 : *
2995 : * Returns true if we're done (with or without a select), or false if the
2996 : * caller must retry the INSERT from scratch.
2997 : */
2998 : static bool
2999 192 : ExecOnConflictSelect(ModifyTableContext *context,
3000 : ResultRelInfo *resultRelInfo,
3001 : ItemPointer conflictTid,
3002 : TupleTableSlot *excludedSlot,
3003 : bool canSetTag,
3004 : TupleTableSlot **returning)
3005 : {
3006 192 : ModifyTableState *mtstate = context->mtstate;
3007 192 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3008 192 : Relation relation = resultRelInfo->ri_RelationDesc;
3009 192 : ExprState *onConflictSelectWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
3010 192 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
3011 192 : LockClauseStrength lockStrength = resultRelInfo->ri_onConflict->oc_LockStrength;
3012 :
3013 : /*
3014 : * Parse analysis should have blocked ON CONFLICT for all system
3015 : * relations, which includes these. There's no fundamental obstacle to
3016 : * supporting this; we'd just need to handle LOCKTAG_TUPLE appropriately.
3017 : */
3018 : Assert(!resultRelInfo->ri_needLockTagTuple);
3019 :
3020 : /* Fetch/lock existing tuple, according to the requested lock strength */
3021 192 : if (lockStrength == LCS_NONE)
3022 : {
3023 122 : if (!table_tuple_fetch_row_version(relation,
3024 : conflictTid,
3025 : SnapshotAny,
3026 : existing))
3027 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
3028 : }
3029 : else
3030 : {
3031 : LockTupleMode lockmode;
3032 :
3033 70 : switch (lockStrength)
3034 : {
3035 1 : case LCS_FORKEYSHARE:
3036 1 : lockmode = LockTupleKeyShare;
3037 1 : break;
3038 1 : case LCS_FORSHARE:
3039 1 : lockmode = LockTupleShare;
3040 1 : break;
3041 1 : case LCS_FORNOKEYUPDATE:
3042 1 : lockmode = LockTupleNoKeyExclusive;
3043 1 : break;
3044 67 : case LCS_FORUPDATE:
3045 67 : lockmode = LockTupleExclusive;
3046 67 : break;
3047 0 : default:
3048 0 : elog(ERROR, "Unexpected lock strength %d", (int) lockStrength);
3049 : }
3050 :
3051 70 : if (!ExecOnConflictLockRow(context, existing, conflictTid,
3052 : resultRelInfo->ri_RelationDesc, lockmode, false))
3053 0 : return false;
3054 : }
3055 :
3056 : /*
3057 : * Verify that the tuple is visible to our MVCC snapshot if the current
3058 : * isolation level mandates that. See comments in ExecOnConflictUpdate().
3059 : */
3060 180 : ExecCheckTupleVisible(context->estate, relation, existing);
3061 :
3062 : /*
3063 : * Make tuple and any needed join variables available to ExecQual. The
3064 : * EXCLUDED tuple is installed in ecxt_innertuple, while the target's
3065 : * existing tuple is installed in the scantuple. EXCLUDED has been made
3066 : * to reference INNER_VAR in setrefs.c, but there is no other redirection.
3067 : */
3068 180 : econtext->ecxt_scantuple = existing;
3069 180 : econtext->ecxt_innertuple = excludedSlot;
3070 180 : econtext->ecxt_outertuple = NULL;
3071 :
3072 180 : if (!ExecQual(onConflictSelectWhere, econtext))
3073 : {
3074 24 : ExecClearTuple(existing); /* see return below */
3075 24 : InstrCountFiltered1(&mtstate->ps, 1);
3076 24 : return true; /* done with the tuple */
3077 : }
3078 :
3079 156 : if (resultRelInfo->ri_WithCheckOptions != NIL)
3080 : {
3081 : /*
3082 : * Check target's existing tuple against SELECT-applicable USING
3083 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
3084 : *
3085 : * The rewriter creates WCOs from the USING quals of SELECT policies,
3086 : * and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK. If FOR
3087 : * UPDATE/SHARE was specified, UPDATE permissions are required on the
3088 : * target table, and the rewriter also adds WCOs built from the USING
3089 : * quals of UPDATE policies, using WCOs of the same kind, and this
3090 : * check enforces them too.
3091 : */
3092 24 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
3093 : existing,
3094 : mtstate->ps.state);
3095 : }
3096 :
3097 : /* RETURNING is required for DO SELECT */
3098 : Assert(resultRelInfo->ri_projectReturning);
3099 :
3100 152 : *returning = ExecProcessReturning(context, resultRelInfo, false,
3101 : existing, existing, context->planSlot);
3102 :
3103 152 : if (canSetTag)
3104 152 : context->estate->es_processed++;
3105 :
3106 : /*
3107 : * Before releasing the existing tuple, make sure that the returning slot
3108 : * has a local copy of any pass-by-reference values.
3109 : */
3110 152 : ExecMaterializeSlot(*returning);
3111 :
3112 : /*
3113 : * Clear out existing tuple, as there might not be another conflict among
3114 : * the next input rows. Don't want to hold resources till the end of the
3115 : * query.
3116 : */
3117 152 : ExecClearTuple(existing);
3118 :
3119 152 : return true;
3120 : }
3121 :
3122 : /*
3123 : * Perform MERGE.
3124 : */
3125 : static TupleTableSlot *
3126 10302 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3127 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
3128 : {
3129 10302 : TupleTableSlot *rslot = NULL;
3130 : bool matched;
3131 :
3132 : /*-----
3133 : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
3134 : * valid, depending on whether the result relation is a table or a view.
3135 : * We execute the first action for which the additional WHEN MATCHED AND
3136 : * quals pass. If an action without quals is found, that action is
3137 : * executed.
3138 : *
3139 : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
3140 : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
3141 : * in sequence until one passes. This is almost identical to the WHEN
3142 : * MATCHED case, and both cases are handled by ExecMergeMatched().
3143 : *
3144 : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
3145 : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
3146 : * TARGET] actions in sequence until one passes.
3147 : *
3148 : * Things get interesting in case of concurrent update/delete of the
3149 : * target tuple. Such concurrent update/delete is detected while we are
3150 : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
3151 : *
3152 : * A concurrent update can:
3153 : *
3154 : * 1. modify the target tuple so that the results from checking any
3155 : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
3156 : * SOURCE actions potentially change, but the result from the join
3157 : * quals does not change.
3158 : *
3159 : * In this case, we are still dealing with the same kind of match
3160 : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
3161 : * actions from the start and choose the first one that satisfies the
3162 : * new target tuple.
3163 : *
3164 : * 2. modify the target tuple in the WHEN MATCHED case so that the join
3165 : * quals no longer pass and hence the source and target tuples no
3166 : * longer match.
3167 : *
3168 : * In this case, we are now dealing with a NOT MATCHED case, and we
3169 : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
3170 : * TARGET] actions. First ExecMergeMatched() processes the list of
3171 : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
3172 : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
3173 : * TARGET] actions in sequence until one passes. Thus we may execute
3174 : * two actions; one of each kind.
3175 : *
3176 : * Thus we support concurrent updates that turn MATCHED candidate rows
3177 : * into NOT MATCHED rows. However, we do not attempt to support cases
3178 : * that would turn NOT MATCHED rows into MATCHED rows, or which would
3179 : * cause a target row to match a different source row.
3180 : *
3181 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
3182 : * [BY TARGET].
3183 : *
3184 : * ExecMergeMatched() takes care of following the update chain and
3185 : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
3186 : * action, as long as the target tuple still exists. If the target tuple
3187 : * gets deleted or a concurrent update causes the join quals to fail, it
3188 : * returns a matched status of false and we call ExecMergeNotMatched().
3189 : * Given that ExecMergeMatched() always makes progress by following the
3190 : * update chain and we never switch from ExecMergeNotMatched() to
3191 : * ExecMergeMatched(), there is no risk of a livelock.
3192 : */
3193 10302 : matched = tupleid != NULL || oldtuple != NULL;
3194 10302 : if (matched)
3195 8515 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
3196 : canSetTag, &matched);
3197 :
3198 : /*
3199 : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
3200 : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
3201 : * "matched" to false, indicating that it no longer matches).
3202 : */
3203 10240 : if (!matched)
3204 : {
3205 : /*
3206 : * If a concurrent update turned a MATCHED case into a NOT MATCHED
3207 : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
3208 : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
3209 : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
3210 : * SOURCE action, and computed the row to return. If so, we cannot
3211 : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
3212 : * pending (to be processed on the next call to ExecModifyTable()).
3213 : * Otherwise, just process the action now.
3214 : */
3215 1796 : if (rslot == NULL)
3216 1794 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
3217 : else
3218 2 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
3219 : }
3220 :
3221 10201 : return rslot;
3222 : }
3223 :
3224 : /*
3225 : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3226 : * action, depending on whether the join quals are satisfied. If the target
3227 : * relation is a table, the current target tuple is identified by tupleid.
3228 : * Otherwise, if the target relation is a view, oldtuple is the current target
3229 : * tuple from the view.
3230 : *
3231 : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3232 : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3233 : * action do not pass, we check the second, then the third and so on. If we
3234 : * reach the end without finding a qualifying action, we return NULL.
3235 : * Otherwise, we execute the qualifying action and return its RETURNING
3236 : * result, if any, or NULL.
3237 : *
3238 : * On entry, "*matched" is assumed to be true. If a concurrent update or
3239 : * delete is detected that causes the join quals to no longer pass, we set it
3240 : * to false, indicating that the caller should process any NOT MATCHED [BY
3241 : * TARGET] actions.
3242 : *
3243 : * After a concurrent update, we restart from the first action to look for a
3244 : * new qualifying action to execute. If the join quals originally passed, and
3245 : * the concurrent update caused them to no longer pass, then we switch from
3246 : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3247 : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3248 : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3249 : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3250 : */
3251 : static TupleTableSlot *
3252 8515 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3253 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3254 : bool *matched)
3255 : {
3256 8515 : ModifyTableState *mtstate = context->mtstate;
3257 8515 : List **mergeActions = resultRelInfo->ri_MergeActions;
3258 : ItemPointerData lockedtid;
3259 : List *actionStates;
3260 8515 : TupleTableSlot *newslot = NULL;
3261 8515 : TupleTableSlot *rslot = NULL;
3262 8515 : EState *estate = context->estate;
3263 8515 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3264 : bool isNull;
3265 8515 : EPQState *epqstate = &mtstate->mt_epqstate;
3266 : ListCell *l;
3267 :
3268 : /* Expect matched to be true on entry */
3269 : Assert(*matched);
3270 :
3271 : /*
3272 : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3273 : * are done.
3274 : */
3275 8515 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3276 780 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
3277 332 : return NULL;
3278 :
3279 : /*
3280 : * Make tuple and any needed join variables available to ExecQual and
3281 : * ExecProject. The target's existing tuple is installed in the scantuple.
3282 : * This target relation's slot is required only in the case of a MATCHED
3283 : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3284 : */
3285 8183 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3286 8183 : econtext->ecxt_innertuple = context->planSlot;
3287 8183 : econtext->ecxt_outertuple = NULL;
3288 :
3289 : /*
3290 : * This routine is only invoked for matched target rows, so we should
3291 : * either have the tupleid of the target row, or an old tuple from the
3292 : * target wholerow junk attr.
3293 : */
3294 : Assert(tupleid != NULL || oldtuple != NULL);
3295 8183 : ItemPointerSetInvalid(&lockedtid);
3296 8183 : if (oldtuple != NULL)
3297 : {
3298 : Assert(!resultRelInfo->ri_needLockTagTuple);
3299 64 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3300 : false);
3301 : }
3302 : else
3303 : {
3304 8119 : if (resultRelInfo->ri_needLockTagTuple)
3305 : {
3306 : /*
3307 : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3308 : * that don't match mas_whenqual. MERGE on system catalogs is a
3309 : * minor use case, so don't bother optimizing those.
3310 : */
3311 5506 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3312 : InplaceUpdateTupleLock);
3313 5506 : lockedtid = *tupleid;
3314 : }
3315 8119 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
3316 : tupleid,
3317 : SnapshotAny,
3318 : resultRelInfo->ri_oldTupleSlot))
3319 0 : elog(ERROR, "failed to fetch the target tuple");
3320 : }
3321 :
3322 : /*
3323 : * Test the join condition. If it's satisfied, perform a MATCHED action.
3324 : * Otherwise, perform a NOT MATCHED BY SOURCE action.
3325 : *
3326 : * Note that this join condition will be NULL if there are no NOT MATCHED
3327 : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3328 : * need only consider MATCHED actions here.
3329 : */
3330 8183 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3331 8061 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
3332 : else
3333 122 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3334 :
3335 8183 : lmerge_matched:
3336 :
3337 14696 : foreach(l, actionStates)
3338 : {
3339 8266 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
3340 8266 : CmdType commandType = relaction->mas_action->commandType;
3341 : TM_Result result;
3342 8266 : UpdateContext updateCxt = {0};
3343 :
3344 : /*
3345 : * Test condition, if any.
3346 : *
3347 : * In the absence of any condition, we perform the action
3348 : * unconditionally (no need to check separately since ExecQual() will
3349 : * return true if there are no conditions to evaluate).
3350 : */
3351 8266 : if (!ExecQual(relaction->mas_whenqual, econtext))
3352 6473 : continue;
3353 :
3354 : /*
3355 : * Check if the existing target tuple meets the USING checks of
3356 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3357 : * error.
3358 : *
3359 : * The WITH CHECK quals for UPDATE RLS policies are applied in
3360 : * ExecUpdateAct() and hence we need not do anything special to handle
3361 : * them.
3362 : *
3363 : * NOTE: We must do this after WHEN quals are evaluated, so that we
3364 : * check policies only when they matter.
3365 : */
3366 1793 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3367 : {
3368 76 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
3369 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
3370 : resultRelInfo,
3371 : resultRelInfo->ri_oldTupleSlot,
3372 76 : context->mtstate->ps.state);
3373 : }
3374 :
3375 : /* Perform stated action */
3376 1777 : switch (commandType)
3377 : {
3378 1413 : case CMD_UPDATE:
3379 :
3380 : /*
3381 : * Project the output tuple, and use that to update the table.
3382 : * We don't need to filter out junk attributes, because the
3383 : * UPDATE action's targetlist doesn't have any.
3384 : */
3385 1413 : newslot = ExecProject(relaction->mas_proj);
3386 :
3387 1413 : mtstate->mt_merge_action = relaction;
3388 1413 : if (!ExecUpdatePrologue(context, resultRelInfo,
3389 : tupleid, NULL, newslot, &result))
3390 : {
3391 11 : if (result == TM_Ok)
3392 102 : goto out; /* "do nothing" */
3393 :
3394 7 : break; /* concurrent update/delete */
3395 : }
3396 :
3397 : /* INSTEAD OF ROW UPDATE Triggers */
3398 1402 : if (resultRelInfo->ri_TrigDesc &&
3399 230 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3400 : {
3401 52 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3402 : oldtuple, newslot))
3403 0 : goto out; /* "do nothing" */
3404 : }
3405 : else
3406 : {
3407 : /* checked ri_needLockTagTuple above */
3408 : Assert(oldtuple == NULL);
3409 :
3410 1350 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3411 : NULL, newslot, canSetTag,
3412 : &updateCxt);
3413 :
3414 : /*
3415 : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3416 : * cross-partition update was done, then there's nothing
3417 : * else for us to do --- the UPDATE has been turned into a
3418 : * DELETE and an INSERT, and we must not perform any of
3419 : * the usual post-update tasks. Also, the RETURNING tuple
3420 : * (if any) has been projected, so we can just return
3421 : * that.
3422 : */
3423 1335 : if (updateCxt.crossPartUpdate)
3424 : {
3425 89 : mtstate->mt_merge_updated += 1;
3426 89 : rslot = context->cpUpdateReturningSlot;
3427 89 : goto out;
3428 : }
3429 : }
3430 :
3431 1298 : if (result == TM_Ok)
3432 : {
3433 1251 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3434 : tupleid, NULL, newslot);
3435 1243 : mtstate->mt_merge_updated += 1;
3436 : }
3437 1290 : break;
3438 :
3439 344 : case CMD_DELETE:
3440 344 : mtstate->mt_merge_action = relaction;
3441 344 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3442 : NULL, NULL, &result))
3443 : {
3444 7 : if (result == TM_Ok)
3445 4 : goto out; /* "do nothing" */
3446 :
3447 3 : break; /* concurrent update/delete */
3448 : }
3449 :
3450 : /* INSTEAD OF ROW DELETE Triggers */
3451 337 : if (resultRelInfo->ri_TrigDesc &&
3452 37 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3453 : {
3454 4 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3455 : oldtuple))
3456 0 : goto out; /* "do nothing" */
3457 : }
3458 : else
3459 : {
3460 : /* checked ri_needLockTagTuple above */
3461 : Assert(oldtuple == NULL);
3462 :
3463 333 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3464 : false);
3465 : }
3466 :
3467 337 : if (result == TM_Ok)
3468 : {
3469 326 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3470 : false);
3471 326 : mtstate->mt_merge_deleted += 1;
3472 : }
3473 337 : break;
3474 :
3475 20 : case CMD_NOTHING:
3476 : /* Doing nothing is always OK */
3477 20 : result = TM_Ok;
3478 20 : break;
3479 :
3480 0 : default:
3481 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3482 : }
3483 :
3484 1657 : switch (result)
3485 : {
3486 1589 : case TM_Ok:
3487 : /* all good; perform final actions */
3488 1589 : if (canSetTag && commandType != CMD_NOTHING)
3489 1555 : (estate->es_processed)++;
3490 :
3491 1589 : break;
3492 :
3493 21 : case TM_SelfModified:
3494 :
3495 : /*
3496 : * The target tuple was already updated or deleted by the
3497 : * current command, or by a later command in the current
3498 : * transaction. The former case is explicitly disallowed by
3499 : * the SQL standard for MERGE, which insists that the MERGE
3500 : * join condition should not join a target row to more than
3501 : * one source row.
3502 : *
3503 : * The latter case arises if the tuple is modified by a
3504 : * command in a BEFORE trigger, or perhaps by a command in a
3505 : * volatile function used in the query. In such situations we
3506 : * should not ignore the MERGE action, but it is equally
3507 : * unsafe to proceed. We don't want to discard the original
3508 : * MERGE action while keeping the triggered actions based on
3509 : * it; and it would be no better to allow the original MERGE
3510 : * action while discarding the updates that it triggered. So
3511 : * throwing an error is the only safe course.
3512 : */
3513 21 : if (context->tmfd.cmax != estate->es_output_cid)
3514 8 : ereport(ERROR,
3515 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3516 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3517 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3518 :
3519 13 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3520 13 : ereport(ERROR,
3521 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3522 : /* translator: %s is a SQL command name */
3523 : errmsg("%s command cannot affect row a second time",
3524 : "MERGE"),
3525 : errhint("Ensure that not more than one source row matches any one target row.")));
3526 :
3527 : /* This shouldn't happen */
3528 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3529 : break;
3530 :
3531 5 : case TM_Deleted:
3532 5 : if (IsolationUsesXactSnapshot())
3533 0 : ereport(ERROR,
3534 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3535 : errmsg("could not serialize access due to concurrent delete")));
3536 :
3537 : /*
3538 : * If the tuple was already deleted, set matched to false to
3539 : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3540 : */
3541 5 : *matched = false;
3542 5 : goto out;
3543 :
3544 42 : case TM_Updated:
3545 : {
3546 : bool was_matched;
3547 : Relation resultRelationDesc;
3548 : TupleTableSlot *epqslot,
3549 : *inputslot;
3550 : LockTupleMode lockmode;
3551 :
3552 42 : if (IsolationUsesXactSnapshot())
3553 1 : ereport(ERROR,
3554 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3555 : errmsg("could not serialize access due to concurrent update")));
3556 :
3557 : /*
3558 : * The target tuple was concurrently updated by some other
3559 : * transaction. If we are currently processing a MATCHED
3560 : * action, use EvalPlanQual() with the new version of the
3561 : * tuple and recheck the join qual, to detect a change
3562 : * from the MATCHED to the NOT MATCHED cases. If we are
3563 : * already processing a NOT MATCHED BY SOURCE action, we
3564 : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3565 : * MATCHED).
3566 : */
3567 41 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3568 41 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3569 41 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3570 :
3571 41 : if (was_matched)
3572 41 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3573 : resultRelInfo->ri_RangeTableIndex);
3574 : else
3575 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3576 :
3577 41 : result = table_tuple_lock(resultRelationDesc, tupleid,
3578 : estate->es_snapshot,
3579 : inputslot, estate->es_output_cid,
3580 : lockmode, LockWaitBlock,
3581 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3582 : &context->tmfd);
3583 41 : switch (result)
3584 : {
3585 40 : case TM_Ok:
3586 :
3587 : /*
3588 : * If the tuple was updated and migrated to
3589 : * another partition concurrently, the current
3590 : * MERGE implementation can't follow. There's
3591 : * probably a better way to handle this case, but
3592 : * it'd require recognizing the relation to which
3593 : * the tuple moved, and setting our current
3594 : * resultRelInfo to that.
3595 : */
3596 40 : if (ItemPointerIndicatesMovedPartitions(tupleid))
3597 0 : ereport(ERROR,
3598 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3599 : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3600 :
3601 : /*
3602 : * If this was a MATCHED case, use EvalPlanQual()
3603 : * to recheck the join condition.
3604 : */
3605 40 : if (was_matched)
3606 : {
3607 40 : epqslot = EvalPlanQual(epqstate,
3608 : resultRelationDesc,
3609 : resultRelInfo->ri_RangeTableIndex,
3610 : inputslot);
3611 :
3612 : /*
3613 : * If the subplan didn't return a tuple, then
3614 : * we must be dealing with an inner join for
3615 : * which the join condition no longer matches.
3616 : * This can only happen if there are no NOT
3617 : * MATCHED actions, and so there is nothing
3618 : * more to do.
3619 : */
3620 40 : if (TupIsNull(epqslot))
3621 0 : goto out;
3622 :
3623 : /*
3624 : * If we got a NULL ctid from the subplan, the
3625 : * join quals no longer pass and we switch to
3626 : * the NOT MATCHED BY SOURCE case.
3627 : */
3628 40 : (void) ExecGetJunkAttribute(epqslot,
3629 40 : resultRelInfo->ri_RowIdAttNo,
3630 : &isNull);
3631 40 : if (isNull)
3632 2 : *matched = false;
3633 :
3634 : /*
3635 : * Otherwise, recheck the join quals to see if
3636 : * we need to switch to the NOT MATCHED BY
3637 : * SOURCE case.
3638 : */
3639 40 : if (resultRelInfo->ri_needLockTagTuple)
3640 : {
3641 1 : if (ItemPointerIsValid(&lockedtid))
3642 1 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3643 : InplaceUpdateTupleLock);
3644 1 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3645 : InplaceUpdateTupleLock);
3646 1 : lockedtid = *tupleid;
3647 : }
3648 :
3649 40 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3650 : tupleid,
3651 : SnapshotAny,
3652 : resultRelInfo->ri_oldTupleSlot))
3653 0 : elog(ERROR, "failed to fetch the target tuple");
3654 :
3655 40 : if (*matched)
3656 38 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3657 : econtext);
3658 :
3659 : /* Switch lists, if necessary */
3660 40 : if (!*matched)
3661 : {
3662 4 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3663 :
3664 : /*
3665 : * If we have both NOT MATCHED BY SOURCE
3666 : * and NOT MATCHED BY TARGET actions (a
3667 : * full join between the source and target
3668 : * relations), the single previously
3669 : * matched tuple from the outer plan node
3670 : * is treated as two not matched tuples,
3671 : * in the same way as if they had not
3672 : * matched to start with. Therefore, we
3673 : * must adjust the outer plan node's tuple
3674 : * count, if we're instrumenting the
3675 : * query, to get the correct "skipped" row
3676 : * count --- see show_modifytable_info().
3677 : */
3678 4 : if (outerPlanState(mtstate)->instrument &&
3679 1 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] &&
3680 1 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET])
3681 1 : InstrUpdateTupleCount(outerPlanState(mtstate)->instrument, 1.0);
3682 : }
3683 : }
3684 :
3685 : /*
3686 : * Loop back and process the MATCHED or NOT
3687 : * MATCHED BY SOURCE actions from the start.
3688 : */
3689 40 : goto lmerge_matched;
3690 :
3691 0 : case TM_Deleted:
3692 :
3693 : /*
3694 : * tuple already deleted; tell caller to run NOT
3695 : * MATCHED [BY TARGET] actions
3696 : */
3697 0 : *matched = false;
3698 0 : goto out;
3699 :
3700 1 : case TM_SelfModified:
3701 :
3702 : /*
3703 : * This can be reached when following an update
3704 : * chain from a tuple updated by another session,
3705 : * reaching a tuple that was already updated or
3706 : * deleted by the current command, or by a later
3707 : * command in the current transaction. As above,
3708 : * this should always be treated as an error.
3709 : */
3710 1 : if (context->tmfd.cmax != estate->es_output_cid)
3711 0 : ereport(ERROR,
3712 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3713 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3714 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3715 :
3716 1 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3717 1 : ereport(ERROR,
3718 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3719 : /* translator: %s is a SQL command name */
3720 : errmsg("%s command cannot affect row a second time",
3721 : "MERGE"),
3722 : errhint("Ensure that not more than one source row matches any one target row.")));
3723 :
3724 : /* This shouldn't happen */
3725 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3726 : goto out;
3727 :
3728 0 : default:
3729 : /* see table_tuple_lock call in ExecDelete() */
3730 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3731 : result);
3732 : goto out;
3733 : }
3734 : }
3735 :
3736 0 : case TM_Invisible:
3737 : case TM_WouldBlock:
3738 : case TM_BeingModified:
3739 : /* these should not occur */
3740 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3741 : break;
3742 : }
3743 :
3744 : /* Process RETURNING if present */
3745 1589 : if (resultRelInfo->ri_projectReturning)
3746 : {
3747 284 : switch (commandType)
3748 : {
3749 124 : case CMD_UPDATE:
3750 124 : rslot = ExecProcessReturning(context,
3751 : resultRelInfo,
3752 : false,
3753 : resultRelInfo->ri_oldTupleSlot,
3754 : newslot,
3755 : context->planSlot);
3756 124 : break;
3757 :
3758 160 : case CMD_DELETE:
3759 160 : rslot = ExecProcessReturning(context,
3760 : resultRelInfo,
3761 : true,
3762 : resultRelInfo->ri_oldTupleSlot,
3763 : NULL,
3764 : context->planSlot);
3765 160 : break;
3766 :
3767 0 : case CMD_NOTHING:
3768 0 : break;
3769 :
3770 0 : default:
3771 0 : elog(ERROR, "unrecognized commandType: %d",
3772 : (int) commandType);
3773 : }
3774 : }
3775 :
3776 : /*
3777 : * We've activated one of the WHEN clauses, so we don't search
3778 : * further. This is required behaviour, not an optimization.
3779 : */
3780 1589 : break;
3781 : }
3782 :
3783 : /*
3784 : * Successfully executed an action or no qualifying action was found.
3785 : */
3786 8121 : out:
3787 8121 : if (ItemPointerIsValid(&lockedtid))
3788 5506 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3789 : InplaceUpdateTupleLock);
3790 8121 : return rslot;
3791 : }
3792 :
3793 : /*
3794 : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3795 : */
3796 : static TupleTableSlot *
3797 1796 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3798 : bool canSetTag)
3799 : {
3800 1796 : ModifyTableState *mtstate = context->mtstate;
3801 1796 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3802 : List *actionStates;
3803 1796 : TupleTableSlot *rslot = NULL;
3804 : ListCell *l;
3805 :
3806 : /*
3807 : * For INSERT actions, the root relation's merge action is OK since the
3808 : * INSERT's targetlist and the WHEN conditions can only refer to the
3809 : * source relation and hence it does not matter which result relation we
3810 : * work with.
3811 : *
3812 : * XXX does this mean that we can avoid creating copies of actionStates on
3813 : * partitioned tables, for not-matched actions?
3814 : */
3815 1796 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3816 :
3817 : /*
3818 : * Make source tuple available to ExecQual and ExecProject. We don't need
3819 : * the target tuple, since the WHEN quals and targetlist can't refer to
3820 : * the target columns.
3821 : */
3822 1796 : econtext->ecxt_scantuple = NULL;
3823 1796 : econtext->ecxt_innertuple = context->planSlot;
3824 1796 : econtext->ecxt_outertuple = NULL;
3825 :
3826 2376 : foreach(l, actionStates)
3827 : {
3828 1796 : MergeActionState *action = (MergeActionState *) lfirst(l);
3829 1796 : CmdType commandType = action->mas_action->commandType;
3830 : TupleTableSlot *newslot;
3831 :
3832 : /*
3833 : * Test condition, if any.
3834 : *
3835 : * In the absence of any condition, we perform the action
3836 : * unconditionally (no need to check separately since ExecQual() will
3837 : * return true if there are no conditions to evaluate).
3838 : */
3839 1796 : if (!ExecQual(action->mas_whenqual, econtext))
3840 580 : continue;
3841 :
3842 : /* Perform stated action */
3843 1216 : switch (commandType)
3844 : {
3845 1216 : case CMD_INSERT:
3846 :
3847 : /*
3848 : * Project the tuple. In case of a partitioned table, the
3849 : * projection was already built to use the root's descriptor,
3850 : * so we don't need to map the tuple here.
3851 : */
3852 1216 : newslot = ExecProject(action->mas_proj);
3853 1216 : mtstate->mt_merge_action = action;
3854 :
3855 1216 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3856 : newslot, canSetTag, NULL, NULL);
3857 1177 : mtstate->mt_merge_inserted += 1;
3858 1177 : break;
3859 0 : case CMD_NOTHING:
3860 : /* Do nothing */
3861 0 : break;
3862 0 : default:
3863 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3864 : }
3865 :
3866 : /*
3867 : * We've activated one of the WHEN clauses, so we don't search
3868 : * further. This is required behaviour, not an optimization.
3869 : */
3870 1177 : break;
3871 : }
3872 :
3873 1757 : return rslot;
3874 : }
3875 :
3876 : /*
3877 : * Initialize state for execution of MERGE.
3878 : */
3879 : void
3880 1051 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3881 : {
3882 1051 : List *mergeActionLists = mtstate->mt_mergeActionLists;
3883 1051 : List *mergeJoinConditions = mtstate->mt_mergeJoinConditions;
3884 1051 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3885 : ResultRelInfo *resultRelInfo;
3886 : ExprContext *econtext;
3887 : ListCell *lc;
3888 : int i;
3889 :
3890 1051 : if (mergeActionLists == NIL)
3891 0 : return;
3892 :
3893 1051 : mtstate->mt_merge_subcommands = 0;
3894 :
3895 1051 : if (mtstate->ps.ps_ExprContext == NULL)
3896 863 : ExecAssignExprContext(estate, &mtstate->ps);
3897 1051 : econtext = mtstate->ps.ps_ExprContext;
3898 :
3899 : /*
3900 : * Create a MergeActionState for each action on the mergeActionList and
3901 : * add it to either a list of matched actions or not-matched actions.
3902 : *
3903 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3904 : * anything here, do so there too.
3905 : */
3906 1051 : i = 0;
3907 2258 : foreach(lc, mergeActionLists)
3908 : {
3909 1207 : List *mergeActionList = lfirst(lc);
3910 : Node *joinCondition;
3911 : TupleDesc relationDesc;
3912 : ListCell *l;
3913 :
3914 1207 : joinCondition = (Node *) list_nth(mergeJoinConditions, i);
3915 1207 : resultRelInfo = mtstate->resultRelInfo + i;
3916 1207 : i++;
3917 1207 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3918 :
3919 : /* initialize slots for MERGE fetches from this rel */
3920 1207 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3921 1207 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3922 :
3923 : /* initialize state for join condition checking */
3924 1207 : resultRelInfo->ri_MergeJoinCondition =
3925 1207 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3926 :
3927 3307 : foreach(l, mergeActionList)
3928 : {
3929 2100 : MergeAction *action = (MergeAction *) lfirst(l);
3930 : MergeActionState *action_state;
3931 : TupleTableSlot *tgtslot;
3932 : TupleDesc tgtdesc;
3933 :
3934 : /*
3935 : * Build action merge state for this rel. (For partitions,
3936 : * equivalent code exists in ExecInitPartitionInfo.)
3937 : */
3938 2100 : action_state = makeNode(MergeActionState);
3939 2100 : action_state->mas_action = action;
3940 2100 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3941 : &mtstate->ps);
3942 :
3943 : /*
3944 : * We create three lists - one for each MergeMatchKind - and stick
3945 : * the MergeActionState into the appropriate list.
3946 : */
3947 4200 : resultRelInfo->ri_MergeActions[action->matchKind] =
3948 2100 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3949 : action_state);
3950 :
3951 2100 : switch (action->commandType)
3952 : {
3953 700 : case CMD_INSERT:
3954 : /* INSERT actions always use rootRelInfo */
3955 700 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3956 : action->targetList);
3957 :
3958 : /*
3959 : * If the MERGE targets a partitioned table, any INSERT
3960 : * actions must be routed through it, not the child
3961 : * relations. Initialize the routing struct and the root
3962 : * table's "new" tuple slot for that, if not already done.
3963 : * The projection we prepare, for all relations, uses the
3964 : * root relation descriptor, and targets the plan's root
3965 : * slot. (This is consistent with the fact that we
3966 : * checked the plan output to match the root relation,
3967 : * above.)
3968 : */
3969 700 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3970 : RELKIND_PARTITIONED_TABLE)
3971 : {
3972 216 : if (mtstate->mt_partition_tuple_routing == NULL)
3973 : {
3974 : /*
3975 : * Initialize planstate for routing if not already
3976 : * done.
3977 : *
3978 : * Note that the slot is managed as a standalone
3979 : * slot belonging to ModifyTableState, so we pass
3980 : * NULL for the 2nd argument.
3981 : */
3982 100 : mtstate->mt_root_tuple_slot =
3983 100 : table_slot_create(rootRelInfo->ri_RelationDesc,
3984 : NULL);
3985 100 : mtstate->mt_partition_tuple_routing =
3986 100 : ExecSetupPartitionTupleRouting(estate,
3987 : rootRelInfo->ri_RelationDesc);
3988 : }
3989 216 : tgtslot = mtstate->mt_root_tuple_slot;
3990 216 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3991 : }
3992 : else
3993 : {
3994 : /*
3995 : * If the MERGE targets an inherited table, we insert
3996 : * into the root table, so we must initialize its
3997 : * "new" tuple slot, if not already done, and use its
3998 : * relation descriptor for the projection.
3999 : *
4000 : * For non-inherited tables, rootRelInfo and
4001 : * resultRelInfo are the same, and the "new" tuple
4002 : * slot will already have been initialized.
4003 : */
4004 484 : if (rootRelInfo->ri_newTupleSlot == NULL)
4005 24 : rootRelInfo->ri_newTupleSlot =
4006 24 : table_slot_create(rootRelInfo->ri_RelationDesc,
4007 : &estate->es_tupleTable);
4008 :
4009 484 : tgtslot = rootRelInfo->ri_newTupleSlot;
4010 484 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
4011 : }
4012 :
4013 700 : action_state->mas_proj =
4014 700 : ExecBuildProjectionInfo(action->targetList, econtext,
4015 : tgtslot,
4016 : &mtstate->ps,
4017 : tgtdesc);
4018 :
4019 700 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
4020 700 : break;
4021 1040 : case CMD_UPDATE:
4022 1040 : action_state->mas_proj =
4023 1040 : ExecBuildUpdateProjection(action->targetList,
4024 : true,
4025 : action->updateColnos,
4026 : relationDesc,
4027 : econtext,
4028 : resultRelInfo->ri_newTupleSlot,
4029 : &mtstate->ps);
4030 1040 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
4031 1040 : break;
4032 310 : case CMD_DELETE:
4033 310 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
4034 310 : break;
4035 50 : case CMD_NOTHING:
4036 50 : break;
4037 0 : default:
4038 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
4039 : break;
4040 : }
4041 : }
4042 : }
4043 :
4044 : /*
4045 : * If the MERGE targets an inherited table, any INSERT actions will use
4046 : * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array.
4047 : * Therefore we must initialize its WITH CHECK OPTION constraints and
4048 : * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo
4049 : * entries.
4050 : *
4051 : * Note that the planner does not build a withCheckOptionList or
4052 : * returningList for the root relation, but as in ExecInitPartitionInfo,
4053 : * we can use the first resultRelInfo entry as a reference to calculate
4054 : * the attno's for the root table.
4055 : */
4056 1051 : if (rootRelInfo != mtstate->resultRelInfo &&
4057 159 : rootRelInfo->ri_RelationDesc->rd_rel->relkind != RELKIND_PARTITIONED_TABLE &&
4058 32 : (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0)
4059 : {
4060 24 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
4061 24 : Relation rootRelation = rootRelInfo->ri_RelationDesc;
4062 24 : Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
4063 24 : int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
4064 24 : AttrMap *part_attmap = NULL;
4065 : bool found_whole_row;
4066 :
4067 24 : if (node->withCheckOptionLists != NIL)
4068 : {
4069 : List *wcoList;
4070 12 : List *wcoExprs = NIL;
4071 :
4072 : /* There should be as many WCO lists as result rels */
4073 : Assert(list_length(node->withCheckOptionLists) ==
4074 : list_length(node->resultRelations));
4075 :
4076 : /*
4077 : * Use the first WCO list as a reference. In the most common case,
4078 : * this will be for the same relation as rootRelInfo, and so there
4079 : * will be no need to adjust its attno's.
4080 : */
4081 12 : wcoList = linitial(node->withCheckOptionLists);
4082 12 : if (rootRelation != firstResultRel)
4083 : {
4084 : /* Convert any Vars in it to contain the root's attno's */
4085 : part_attmap =
4086 12 : build_attrmap_by_name(RelationGetDescr(rootRelation),
4087 : RelationGetDescr(firstResultRel),
4088 : false);
4089 :
4090 : wcoList = (List *)
4091 12 : map_variable_attnos((Node *) wcoList,
4092 : firstVarno, 0,
4093 : part_attmap,
4094 12 : RelationGetForm(rootRelation)->reltype,
4095 : &found_whole_row);
4096 : }
4097 :
4098 60 : foreach(lc, wcoList)
4099 : {
4100 48 : WithCheckOption *wco = lfirst_node(WithCheckOption, lc);
4101 48 : ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
4102 : &mtstate->ps);
4103 :
4104 48 : wcoExprs = lappend(wcoExprs, wcoExpr);
4105 : }
4106 :
4107 12 : rootRelInfo->ri_WithCheckOptions = wcoList;
4108 12 : rootRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4109 : }
4110 :
4111 24 : if (node->returningLists != NIL)
4112 : {
4113 : List *returningList;
4114 :
4115 : /* There should be as many returning lists as result rels */
4116 : Assert(list_length(node->returningLists) ==
4117 : list_length(node->resultRelations));
4118 :
4119 : /*
4120 : * Use the first returning list as a reference. In the most common
4121 : * case, this will be for the same relation as rootRelInfo, and so
4122 : * there will be no need to adjust its attno's.
4123 : */
4124 4 : returningList = linitial(node->returningLists);
4125 4 : if (rootRelation != firstResultRel)
4126 : {
4127 : /* Convert any Vars in it to contain the root's attno's */
4128 4 : if (part_attmap == NULL)
4129 : part_attmap =
4130 0 : build_attrmap_by_name(RelationGetDescr(rootRelation),
4131 : RelationGetDescr(firstResultRel),
4132 : false);
4133 :
4134 : returningList = (List *)
4135 4 : map_variable_attnos((Node *) returningList,
4136 : firstVarno, 0,
4137 : part_attmap,
4138 4 : RelationGetForm(rootRelation)->reltype,
4139 : &found_whole_row);
4140 : }
4141 4 : rootRelInfo->ri_returningList = returningList;
4142 :
4143 : /* Initialize the RETURNING projection */
4144 4 : rootRelInfo->ri_projectReturning =
4145 4 : ExecBuildProjectionInfo(returningList, econtext,
4146 : mtstate->ps.ps_ResultTupleSlot,
4147 : &mtstate->ps,
4148 : RelationGetDescr(rootRelation));
4149 : }
4150 : }
4151 : }
4152 :
4153 : /*
4154 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
4155 : *
4156 : * We mark 'projectNewInfoValid' even though the projections themselves
4157 : * are not initialized here.
4158 : */
4159 : void
4160 1222 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
4161 : ResultRelInfo *resultRelInfo)
4162 : {
4163 1222 : EState *estate = mtstate->ps.state;
4164 :
4165 : Assert(!resultRelInfo->ri_projectNewInfoValid);
4166 :
4167 1222 : resultRelInfo->ri_oldTupleSlot =
4168 1222 : table_slot_create(resultRelInfo->ri_RelationDesc,
4169 : &estate->es_tupleTable);
4170 1222 : resultRelInfo->ri_newTupleSlot =
4171 1222 : table_slot_create(resultRelInfo->ri_RelationDesc,
4172 : &estate->es_tupleTable);
4173 1222 : resultRelInfo->ri_projectNewInfoValid = true;
4174 1222 : }
4175 :
4176 : /*
4177 : * Process BEFORE EACH STATEMENT triggers
4178 : */
4179 : static void
4180 72772 : fireBSTriggers(ModifyTableState *node)
4181 : {
4182 72772 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
4183 72772 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4184 :
4185 72772 : switch (node->operation)
4186 : {
4187 55483 : case CMD_INSERT:
4188 55483 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4189 55475 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
4190 596 : ExecBSUpdateTriggers(node->ps.state,
4191 : resultRelInfo);
4192 55475 : break;
4193 8403 : case CMD_UPDATE:
4194 8403 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4195 8403 : break;
4196 7934 : case CMD_DELETE:
4197 7934 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4198 7934 : break;
4199 952 : case CMD_MERGE:
4200 952 : if (node->mt_merge_subcommands & MERGE_INSERT)
4201 519 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4202 952 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4203 629 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4204 952 : if (node->mt_merge_subcommands & MERGE_DELETE)
4205 254 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4206 952 : break;
4207 0 : default:
4208 0 : elog(ERROR, "unknown operation");
4209 : break;
4210 : }
4211 72764 : }
4212 :
4213 : /*
4214 : * Process AFTER EACH STATEMENT triggers
4215 : */
4216 : static void
4217 70622 : fireASTriggers(ModifyTableState *node)
4218 : {
4219 70622 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
4220 70622 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4221 :
4222 70622 : switch (node->operation)
4223 : {
4224 53954 : case CMD_INSERT:
4225 53954 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
4226 524 : ExecASUpdateTriggers(node->ps.state,
4227 : resultRelInfo,
4228 524 : node->mt_oc_transition_capture);
4229 53954 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4230 53954 : node->mt_transition_capture);
4231 53954 : break;
4232 7948 : case CMD_UPDATE:
4233 7948 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4234 7948 : node->mt_transition_capture);
4235 7948 : break;
4236 7869 : case CMD_DELETE:
4237 7869 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4238 7869 : node->mt_transition_capture);
4239 7869 : break;
4240 851 : case CMD_MERGE:
4241 851 : if (node->mt_merge_subcommands & MERGE_DELETE)
4242 230 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4243 230 : node->mt_transition_capture);
4244 851 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4245 564 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4246 564 : node->mt_transition_capture);
4247 851 : if (node->mt_merge_subcommands & MERGE_INSERT)
4248 474 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4249 474 : node->mt_transition_capture);
4250 851 : break;
4251 0 : default:
4252 0 : elog(ERROR, "unknown operation");
4253 : break;
4254 : }
4255 70622 : }
4256 :
4257 : /*
4258 : * Set up the state needed for collecting transition tuples for AFTER
4259 : * triggers.
4260 : */
4261 : static void
4262 73005 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
4263 : {
4264 73005 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
4265 73005 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
4266 :
4267 : /* Check for transition tables on the directly targeted relation. */
4268 73005 : mtstate->mt_transition_capture =
4269 73005 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4270 73005 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4271 : mtstate->operation);
4272 73005 : if (plan->operation == CMD_INSERT &&
4273 55488 : plan->onConflictAction == ONCONFLICT_UPDATE)
4274 600 : mtstate->mt_oc_transition_capture =
4275 600 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4276 600 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4277 : CMD_UPDATE);
4278 73005 : }
4279 :
4280 : /*
4281 : * ExecPrepareTupleRouting --- prepare for routing one tuple
4282 : *
4283 : * Determine the partition in which the tuple in slot is to be inserted,
4284 : * and return its ResultRelInfo in *partRelInfo. The return value is
4285 : * a slot holding the tuple of the partition rowtype.
4286 : *
4287 : * This also sets the transition table information in mtstate based on the
4288 : * selected partition.
4289 : */
4290 : static TupleTableSlot *
4291 480774 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
4292 : EState *estate,
4293 : PartitionTupleRouting *proute,
4294 : ResultRelInfo *targetRelInfo,
4295 : TupleTableSlot *slot,
4296 : ResultRelInfo **partRelInfo)
4297 : {
4298 : ResultRelInfo *partrel;
4299 : TupleConversionMap *map;
4300 :
4301 : /*
4302 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
4303 : * not find a valid partition for the tuple in 'slot' then an error is
4304 : * raised. An error may also be raised if the found partition is not a
4305 : * valid target for INSERTs. This is required since a partitioned table
4306 : * UPDATE to another partition becomes a DELETE+INSERT.
4307 : */
4308 480774 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
4309 :
4310 : /*
4311 : * If we're capturing transition tuples, we might need to convert from the
4312 : * partition rowtype to root partitioned table's rowtype. But if there
4313 : * are no BEFORE triggers on the partition that could change the tuple, we
4314 : * can just remember the original unconverted tuple to avoid a needless
4315 : * round trip conversion.
4316 : */
4317 480630 : if (mtstate->mt_transition_capture != NULL)
4318 : {
4319 : bool has_before_insert_row_trig;
4320 :
4321 130 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
4322 28 : partrel->ri_TrigDesc->trig_insert_before_row);
4323 :
4324 102 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
4325 102 : !has_before_insert_row_trig ? slot : NULL;
4326 : }
4327 :
4328 : /*
4329 : * Convert the tuple, if necessary.
4330 : */
4331 480630 : map = ExecGetRootToChildMap(partrel, estate);
4332 480630 : if (map != NULL)
4333 : {
4334 45712 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
4335 :
4336 45712 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
4337 : }
4338 :
4339 480630 : *partRelInfo = partrel;
4340 480630 : return slot;
4341 : }
4342 :
4343 : /* ----------------------------------------------------------------
4344 : * ExecModifyTable
4345 : *
4346 : * Perform table modifications as required, and return RETURNING results
4347 : * if needed.
4348 : * ----------------------------------------------------------------
4349 : */
4350 : static TupleTableSlot *
4351 78829 : ExecModifyTable(PlanState *pstate)
4352 : {
4353 78829 : ModifyTableState *node = castNode(ModifyTableState, pstate);
4354 : ModifyTableContext context;
4355 78829 : EState *estate = node->ps.state;
4356 78829 : CmdType operation = node->operation;
4357 : ResultRelInfo *resultRelInfo;
4358 : PlanState *subplanstate;
4359 : TupleTableSlot *slot;
4360 : TupleTableSlot *oldSlot;
4361 : ItemPointerData tuple_ctid;
4362 : HeapTupleData oldtupdata;
4363 : HeapTuple oldtuple;
4364 : ItemPointer tupleid;
4365 : bool tuplock;
4366 :
4367 78829 : CHECK_FOR_INTERRUPTS();
4368 :
4369 : /*
4370 : * This should NOT get called during EvalPlanQual; we should have passed a
4371 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
4372 : * Assert because this condition is easy to miss in testing. (Note:
4373 : * although ModifyTable should not get executed within an EvalPlanQual
4374 : * operation, we do have to allow it to be initialized and shut down in
4375 : * case it is within a CTE subplan. Hence this test must be here, not in
4376 : * ExecInitModifyTable.)
4377 : */
4378 78829 : if (estate->es_epq_active != NULL)
4379 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4380 :
4381 : /*
4382 : * If we've already completed processing, don't try to do more. We need
4383 : * this test because ExecPostprocessPlan might call us an extra time, and
4384 : * our subplan's nodes aren't necessarily robust against being called
4385 : * extra times.
4386 : */
4387 78829 : if (node->mt_done)
4388 528 : return NULL;
4389 :
4390 : /*
4391 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
4392 : */
4393 78301 : if (node->fireBSTriggers)
4394 : {
4395 72772 : fireBSTriggers(node);
4396 72764 : node->fireBSTriggers = false;
4397 : }
4398 :
4399 : /* Preload local variables */
4400 78293 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4401 78293 : subplanstate = outerPlanState(node);
4402 :
4403 : /* Set global context */
4404 78293 : context.mtstate = node;
4405 78293 : context.epqstate = &node->mt_epqstate;
4406 78293 : context.estate = estate;
4407 :
4408 : /*
4409 : * Fetch rows from subplan, and execute the required table modification
4410 : * for each row.
4411 : */
4412 : for (;;)
4413 : {
4414 : /*
4415 : * Reset the per-output-tuple exprcontext. This is needed because
4416 : * triggers expect to use that context as workspace. It's a bit ugly
4417 : * to do this below the top level of the plan, however. We might need
4418 : * to rethink this later.
4419 : */
4420 8702857 : ResetPerTupleExprContext(estate);
4421 :
4422 : /*
4423 : * Reset per-tuple memory context used for processing on conflict and
4424 : * returning clauses, to free any expression evaluation storage
4425 : * allocated in the previous cycle.
4426 : */
4427 8702857 : if (pstate->ps_ExprContext)
4428 192558 : ResetExprContext(pstate->ps_ExprContext);
4429 :
4430 : /*
4431 : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4432 : * to execute, do so now --- see the comments in ExecMerge().
4433 : */
4434 8702857 : if (node->mt_merge_pending_not_matched != NULL)
4435 : {
4436 2 : context.planSlot = node->mt_merge_pending_not_matched;
4437 2 : context.cpDeletedSlot = NULL;
4438 :
4439 2 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4440 2 : node->canSetTag);
4441 :
4442 : /* Clear the pending action */
4443 2 : node->mt_merge_pending_not_matched = NULL;
4444 :
4445 : /*
4446 : * If we got a RETURNING result, return it to the caller. We'll
4447 : * continue the work on next call.
4448 : */
4449 2 : if (slot)
4450 2 : return slot;
4451 :
4452 0 : continue; /* continue with the next tuple */
4453 : }
4454 :
4455 : /* Fetch the next row from subplan */
4456 8702855 : context.planSlot = ExecProcNode(subplanstate);
4457 8702580 : context.cpDeletedSlot = NULL;
4458 :
4459 : /* No more tuples to process? */
4460 8702580 : if (TupIsNull(context.planSlot))
4461 : break;
4462 :
4463 : /*
4464 : * When there are multiple result relations, each tuple contains a
4465 : * junk column that gives the OID of the rel from which it came.
4466 : * Extract it and select the correct result relation.
4467 : */
4468 8631957 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
4469 : {
4470 : Datum datum;
4471 : bool isNull;
4472 : Oid resultoid;
4473 :
4474 3353 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4475 : &isNull);
4476 3353 : if (isNull)
4477 : {
4478 : /*
4479 : * For commands other than MERGE, any tuples having InvalidOid
4480 : * for tableoid are errors. For MERGE, we may need to handle
4481 : * them as WHEN NOT MATCHED clauses if any, so do that.
4482 : *
4483 : * Note that we use the node's toplevel resultRelInfo, not any
4484 : * specific partition's.
4485 : */
4486 338 : if (operation == CMD_MERGE)
4487 : {
4488 338 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4489 :
4490 338 : slot = ExecMerge(&context, node->resultRelInfo,
4491 338 : NULL, NULL, node->canSetTag);
4492 :
4493 : /*
4494 : * If we got a RETURNING result, return it to the caller.
4495 : * We'll continue the work on next call.
4496 : */
4497 330 : if (slot)
4498 25 : return slot;
4499 :
4500 305 : continue; /* continue with the next tuple */
4501 : }
4502 :
4503 0 : elog(ERROR, "tableoid is NULL");
4504 : }
4505 3015 : resultoid = DatumGetObjectId(datum);
4506 :
4507 : /* If it's not the same as last time, we need to locate the rel */
4508 3015 : if (resultoid != node->mt_lastResultOid)
4509 2071 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4510 : false, true);
4511 : }
4512 :
4513 : /*
4514 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4515 : * here is compute the RETURNING expressions.
4516 : */
4517 8631619 : if (resultRelInfo->ri_usesFdwDirectModify)
4518 : {
4519 : Assert(resultRelInfo->ri_projectReturning);
4520 :
4521 : /*
4522 : * A scan slot containing the data that was actually inserted,
4523 : * updated or deleted has already been made available to
4524 : * ExecProcessReturning by IterateDirectModify, so no need to
4525 : * provide it here. The individual old and new slots are not
4526 : * needed, since direct-modify is disabled if the RETURNING list
4527 : * refers to OLD/NEW values.
4528 : */
4529 : Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4530 : (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4531 :
4532 347 : slot = ExecProcessReturning(&context, resultRelInfo,
4533 : operation == CMD_DELETE,
4534 : NULL, NULL, context.planSlot);
4535 :
4536 347 : return slot;
4537 : }
4538 :
4539 8631272 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4540 8631272 : slot = context.planSlot;
4541 :
4542 8631272 : tupleid = NULL;
4543 8631272 : oldtuple = NULL;
4544 :
4545 : /*
4546 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4547 : * to be updated/deleted/merged. For a heap relation, that's a TID;
4548 : * otherwise we may have a wholerow junk attr that carries the old
4549 : * tuple in toto. Keep this in step with the part of
4550 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4551 : */
4552 8631272 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4553 : operation == CMD_MERGE)
4554 : {
4555 : char relkind;
4556 : Datum datum;
4557 : bool isNull;
4558 :
4559 1190441 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4560 1190441 : if (relkind == RELKIND_RELATION ||
4561 338 : relkind == RELKIND_MATVIEW ||
4562 : relkind == RELKIND_PARTITIONED_TABLE)
4563 : {
4564 : /*
4565 : * ri_RowIdAttNo refers to a ctid attribute. See the comment
4566 : * in ExecInitModifyTable().
4567 : */
4568 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo) ||
4569 : relkind == RELKIND_PARTITIONED_TABLE);
4570 1190107 : datum = ExecGetJunkAttribute(slot,
4571 1190107 : resultRelInfo->ri_RowIdAttNo,
4572 : &isNull);
4573 :
4574 : /*
4575 : * For commands other than MERGE, any tuples having a null row
4576 : * identifier are errors. For MERGE, we may need to handle
4577 : * them as WHEN NOT MATCHED clauses if any, so do that.
4578 : *
4579 : * Note that we use the node's toplevel resultRelInfo, not any
4580 : * specific partition's.
4581 : */
4582 1190107 : if (isNull)
4583 : {
4584 1417 : if (operation == CMD_MERGE)
4585 : {
4586 1417 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4587 :
4588 1417 : slot = ExecMerge(&context, node->resultRelInfo,
4589 1417 : NULL, NULL, node->canSetTag);
4590 :
4591 : /*
4592 : * If we got a RETURNING result, return it to the
4593 : * caller. We'll continue the work on next call.
4594 : */
4595 1390 : if (slot)
4596 84 : return slot;
4597 :
4598 1334 : continue; /* continue with the next tuple */
4599 : }
4600 :
4601 0 : elog(ERROR, "ctid is NULL");
4602 : }
4603 :
4604 1188690 : tupleid = (ItemPointer) DatumGetPointer(datum);
4605 1188690 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4606 1188690 : tupleid = &tuple_ctid;
4607 : }
4608 :
4609 : /*
4610 : * Use the wholerow attribute, when available, to reconstruct the
4611 : * old relation tuple. The old tuple serves one or both of two
4612 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4613 : * provides values for any unchanged columns for the NEW tuple of
4614 : * an UPDATE, because the subplan does not produce all the columns
4615 : * of the target table.
4616 : *
4617 : * Note that the wholerow attribute does not carry system columns,
4618 : * so foreign table triggers miss seeing those, except that we
4619 : * know enough here to set t_tableOid. Quite separately from
4620 : * this, the FDW may fetch its own junk attrs to identify the row.
4621 : *
4622 : * Other relevant relkinds, currently limited to views, always
4623 : * have a wholerow attribute.
4624 : */
4625 334 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4626 : {
4627 319 : datum = ExecGetJunkAttribute(slot,
4628 319 : resultRelInfo->ri_RowIdAttNo,
4629 : &isNull);
4630 :
4631 : /*
4632 : * For commands other than MERGE, any tuples having a null row
4633 : * identifier are errors. For MERGE, we may need to handle
4634 : * them as WHEN NOT MATCHED clauses if any, so do that.
4635 : *
4636 : * Note that we use the node's toplevel resultRelInfo, not any
4637 : * specific partition's.
4638 : */
4639 319 : if (isNull)
4640 : {
4641 32 : if (operation == CMD_MERGE)
4642 : {
4643 32 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4644 :
4645 32 : slot = ExecMerge(&context, node->resultRelInfo,
4646 32 : NULL, NULL, node->canSetTag);
4647 :
4648 : /*
4649 : * If we got a RETURNING result, return it to the
4650 : * caller. We'll continue the work on next call.
4651 : */
4652 28 : if (slot)
4653 8 : return slot;
4654 :
4655 20 : continue; /* continue with the next tuple */
4656 : }
4657 :
4658 0 : elog(ERROR, "wholerow is NULL");
4659 : }
4660 :
4661 287 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4662 287 : oldtupdata.t_len =
4663 287 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4664 287 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4665 : /* Historically, view triggers see invalid t_tableOid. */
4666 287 : oldtupdata.t_tableOid =
4667 287 : (relkind == RELKIND_VIEW) ? InvalidOid :
4668 105 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4669 :
4670 287 : oldtuple = &oldtupdata;
4671 : }
4672 : else
4673 : {
4674 : /* Only foreign tables are allowed to omit a row-ID attr */
4675 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4676 : }
4677 : }
4678 :
4679 8629823 : switch (operation)
4680 : {
4681 7440831 : case CMD_INSERT:
4682 : /* Initialize projection info if first time for this table */
4683 7440831 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4684 54770 : ExecInitInsertProjection(node, resultRelInfo);
4685 7440831 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4686 7440831 : slot = ExecInsert(&context, resultRelInfo, slot,
4687 7440831 : node->canSetTag, NULL, NULL);
4688 7439431 : break;
4689 :
4690 169820 : case CMD_UPDATE:
4691 169820 : tuplock = false;
4692 :
4693 : /* Initialize projection info if first time for this table */
4694 169820 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4695 8204 : ExecInitUpdateProjection(node, resultRelInfo);
4696 :
4697 : /*
4698 : * Make the new tuple by combining plan's output tuple with
4699 : * the old tuple being updated.
4700 : */
4701 169820 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4702 169820 : if (oldtuple != NULL)
4703 : {
4704 : Assert(!resultRelInfo->ri_needLockTagTuple);
4705 : /* Use the wholerow junk attr as the old tuple. */
4706 179 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4707 : }
4708 : else
4709 : {
4710 : /* Fetch the most recent version of old tuple. */
4711 169641 : Relation relation = resultRelInfo->ri_RelationDesc;
4712 :
4713 169641 : if (resultRelInfo->ri_needLockTagTuple)
4714 : {
4715 14890 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4716 14890 : tuplock = true;
4717 : }
4718 169641 : if (!table_tuple_fetch_row_version(relation, tupleid,
4719 : SnapshotAny,
4720 : oldSlot))
4721 0 : elog(ERROR, "failed to fetch tuple being updated");
4722 : }
4723 169820 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4724 : oldSlot);
4725 :
4726 : /* Now apply the update. */
4727 169820 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4728 169820 : oldSlot, slot, node->canSetTag);
4729 169499 : if (tuplock)
4730 14890 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4731 : InplaceUpdateTupleLock);
4732 169499 : break;
4733 :
4734 1010657 : case CMD_DELETE:
4735 1010657 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4736 1010657 : true, false, node->canSetTag, NULL, NULL, NULL);
4737 1010620 : break;
4738 :
4739 8515 : case CMD_MERGE:
4740 8515 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4741 8515 : node->canSetTag);
4742 8453 : break;
4743 :
4744 0 : default:
4745 0 : elog(ERROR, "unknown operation");
4746 : break;
4747 : }
4748 :
4749 : /*
4750 : * If we got a RETURNING result, return it to caller. We'll continue
4751 : * the work on next call.
4752 : */
4753 8628003 : if (slot)
4754 5078 : return slot;
4755 : }
4756 :
4757 : /*
4758 : * Insert remaining tuples for batch insert.
4759 : */
4760 70623 : if (estate->es_insert_pending_result_relations != NIL)
4761 13 : ExecPendingInserts(estate);
4762 :
4763 : /*
4764 : * We're done, but fire AFTER STATEMENT triggers before exiting.
4765 : */
4766 70622 : fireASTriggers(node);
4767 :
4768 70622 : node->mt_done = true;
4769 :
4770 70622 : return NULL;
4771 : }
4772 :
4773 : /*
4774 : * ExecLookupResultRelByOid
4775 : * If the table with given OID is among the result relations to be
4776 : * updated by the given ModifyTable node, return its ResultRelInfo.
4777 : *
4778 : * If not found, return NULL if missing_ok, else raise error.
4779 : *
4780 : * If update_cache is true, then upon successful lookup, update the node's
4781 : * one-element cache. ONLY ExecModifyTable may pass true for this.
4782 : */
4783 : ResultRelInfo *
4784 8088 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4785 : bool missing_ok, bool update_cache)
4786 : {
4787 8088 : if (node->mt_resultOidHash)
4788 : {
4789 : /* Use the pre-built hash table to locate the rel */
4790 : MTTargetRelLookup *mtlookup;
4791 :
4792 : mtlookup = (MTTargetRelLookup *)
4793 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4794 0 : if (mtlookup)
4795 : {
4796 0 : if (update_cache)
4797 : {
4798 0 : node->mt_lastResultOid = resultoid;
4799 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
4800 : }
4801 0 : return node->resultRelInfo + mtlookup->relationIndex;
4802 : }
4803 : }
4804 : else
4805 : {
4806 : /* With few target rels, just search the ResultRelInfo array */
4807 15438 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4808 : {
4809 9753 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4810 :
4811 9753 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4812 : {
4813 2403 : if (update_cache)
4814 : {
4815 2071 : node->mt_lastResultOid = resultoid;
4816 2071 : node->mt_lastResultIndex = ndx;
4817 : }
4818 2403 : return rInfo;
4819 : }
4820 : }
4821 : }
4822 :
4823 5685 : if (!missing_ok)
4824 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
4825 5685 : return NULL;
4826 : }
4827 :
4828 : /* ----------------------------------------------------------------
4829 : * ExecInitModifyTable
4830 : * ----------------------------------------------------------------
4831 : */
4832 : ModifyTableState *
4833 73668 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4834 : {
4835 : ModifyTableState *mtstate;
4836 73668 : Plan *subplan = outerPlan(node);
4837 73668 : CmdType operation = node->operation;
4838 73668 : int total_nrels = list_length(node->resultRelations);
4839 : int nrels;
4840 73668 : List *resultRelations = NIL;
4841 73668 : List *withCheckOptionLists = NIL;
4842 73668 : List *returningLists = NIL;
4843 73668 : List *updateColnosLists = NIL;
4844 73668 : List *mergeActionLists = NIL;
4845 73668 : List *mergeJoinConditions = NIL;
4846 : ResultRelInfo *resultRelInfo;
4847 : List *arowmarks;
4848 : ListCell *l;
4849 : int i;
4850 : Relation rel;
4851 :
4852 : /* check for unsupported flags */
4853 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4854 :
4855 : /*
4856 : * Only consider unpruned relations for initializing their ResultRelInfo
4857 : * struct and other fields such as withCheckOptions, etc.
4858 : *
4859 : * Note: We must avoid pruning every result relation. This is important
4860 : * for MERGE, since even if every result relation is pruned from the
4861 : * subplan, there might still be NOT MATCHED rows, for which there may be
4862 : * INSERT actions to perform. To allow these actions to be found, at
4863 : * least one result relation must be kept. Also, when inserting into a
4864 : * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct
4865 : * as a reference for building the ResultRelInfo of the target partition.
4866 : * In either case, it doesn't matter which result relation is kept, so we
4867 : * just keep the first one, if all others have been pruned. See also,
4868 : * ExecDoInitialPruning(), which ensures that this first result relation
4869 : * has been locked.
4870 : */
4871 73668 : i = 0;
4872 148983 : foreach(l, node->resultRelations)
4873 : {
4874 75315 : Index rti = lfirst_int(l);
4875 : bool keep_rel;
4876 :
4877 75315 : keep_rel = bms_is_member(rti, estate->es_unpruned_relids);
4878 75315 : if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL)
4879 : {
4880 : /* all result relations pruned; keep the first one */
4881 32 : keep_rel = true;
4882 32 : rti = linitial_int(node->resultRelations);
4883 32 : i = 0;
4884 : }
4885 :
4886 75315 : if (keep_rel)
4887 : {
4888 75258 : resultRelations = lappend_int(resultRelations, rti);
4889 75258 : if (node->withCheckOptionLists)
4890 : {
4891 1052 : List *withCheckOptions = list_nth_node(List,
4892 : node->withCheckOptionLists,
4893 : i);
4894 :
4895 1052 : withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions);
4896 : }
4897 75258 : if (node->returningLists)
4898 : {
4899 3764 : List *returningList = list_nth_node(List,
4900 : node->returningLists,
4901 : i);
4902 :
4903 3764 : returningLists = lappend(returningLists, returningList);
4904 : }
4905 75258 : if (node->updateColnosLists)
4906 : {
4907 9971 : List *updateColnosList = list_nth(node->updateColnosLists, i);
4908 :
4909 9971 : updateColnosLists = lappend(updateColnosLists, updateColnosList);
4910 : }
4911 75258 : if (node->mergeActionLists)
4912 : {
4913 1215 : List *mergeActionList = list_nth(node->mergeActionLists, i);
4914 :
4915 1215 : mergeActionLists = lappend(mergeActionLists, mergeActionList);
4916 : }
4917 75258 : if (node->mergeJoinConditions)
4918 : {
4919 1215 : List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i);
4920 :
4921 1215 : mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition);
4922 : }
4923 : }
4924 75315 : i++;
4925 : }
4926 73668 : nrels = list_length(resultRelations);
4927 : Assert(nrels > 0);
4928 :
4929 : /*
4930 : * create state structure
4931 : */
4932 73668 : mtstate = makeNode(ModifyTableState);
4933 73668 : mtstate->ps.plan = (Plan *) node;
4934 73668 : mtstate->ps.state = estate;
4935 73668 : mtstate->ps.ExecProcNode = ExecModifyTable;
4936 :
4937 73668 : mtstate->operation = operation;
4938 73668 : mtstate->canSetTag = node->canSetTag;
4939 73668 : mtstate->mt_done = false;
4940 :
4941 73668 : mtstate->mt_nrels = nrels;
4942 73668 : mtstate->resultRelInfo = palloc_array(ResultRelInfo, nrels);
4943 :
4944 73668 : mtstate->mt_merge_pending_not_matched = NULL;
4945 73668 : mtstate->mt_merge_inserted = 0;
4946 73668 : mtstate->mt_merge_updated = 0;
4947 73668 : mtstate->mt_merge_deleted = 0;
4948 73668 : mtstate->mt_updateColnosLists = updateColnosLists;
4949 73668 : mtstate->mt_mergeActionLists = mergeActionLists;
4950 73668 : mtstate->mt_mergeJoinConditions = mergeJoinConditions;
4951 :
4952 : /*----------
4953 : * Resolve the target relation. This is the same as:
4954 : *
4955 : * - the relation for which we will fire FOR STATEMENT triggers,
4956 : * - the relation into whose tuple format all captured transition tuples
4957 : * must be converted, and
4958 : * - the root partitioned table used for tuple routing.
4959 : *
4960 : * If it's a partitioned or inherited table, the root partition or
4961 : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4962 : * given explicitly in node->rootRelation. Otherwise, the target relation
4963 : * is the sole relation in the node->resultRelations list and, since it can
4964 : * never be pruned, also in the resultRelations list constructed above.
4965 : *----------
4966 : */
4967 73668 : if (node->rootRelation > 0)
4968 : {
4969 : Assert(bms_is_member(node->rootRelation, estate->es_unpruned_relids));
4970 1863 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4971 1863 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4972 : node->rootRelation);
4973 : }
4974 : else
4975 : {
4976 : Assert(list_length(node->resultRelations) == 1);
4977 : Assert(list_length(resultRelations) == 1);
4978 71805 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4979 71805 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4980 71805 : linitial_int(resultRelations));
4981 : }
4982 :
4983 : /* set up epqstate with dummy subplan data for the moment */
4984 73668 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4985 : node->epqParam, resultRelations);
4986 73668 : mtstate->fireBSTriggers = true;
4987 :
4988 : /*
4989 : * Build state for collecting transition tuples. This requires having a
4990 : * valid trigger query context, so skip it in explain-only mode.
4991 : */
4992 73668 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4993 73005 : ExecSetupTransitionCaptureState(mtstate, estate);
4994 :
4995 : /*
4996 : * Open all the result relations and initialize the ResultRelInfo structs.
4997 : * (But root relation was initialized above, if it's part of the array.)
4998 : * We must do this before initializing the subplan, because direct-modify
4999 : * FDWs expect their ResultRelInfos to be available.
5000 : */
5001 73668 : resultRelInfo = mtstate->resultRelInfo;
5002 73668 : i = 0;
5003 148703 : foreach(l, resultRelations)
5004 : {
5005 75254 : Index resultRelation = lfirst_int(l);
5006 75254 : List *mergeActions = NIL;
5007 :
5008 75254 : if (mergeActionLists)
5009 1215 : mergeActions = list_nth(mergeActionLists, i);
5010 :
5011 75254 : if (resultRelInfo != mtstate->rootResultRelInfo)
5012 : {
5013 3449 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
5014 :
5015 : /*
5016 : * For child result relations, store the root result relation
5017 : * pointer. We do so for the convenience of places that want to
5018 : * look at the query's original target relation but don't have the
5019 : * mtstate handy.
5020 : */
5021 3449 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
5022 : }
5023 :
5024 : /* Initialize the usesFdwDirectModify flag */
5025 75254 : resultRelInfo->ri_usesFdwDirectModify =
5026 75254 : bms_is_member(i, node->fdwDirectModifyPlans);
5027 :
5028 : /*
5029 : * Verify result relation is a valid target for the current operation
5030 : */
5031 75254 : CheckValidResultRel(resultRelInfo, operation, node->onConflictAction,
5032 : mergeActions);
5033 :
5034 75035 : resultRelInfo++;
5035 75035 : i++;
5036 : }
5037 :
5038 : /*
5039 : * Now we may initialize the subplan.
5040 : */
5041 73449 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
5042 :
5043 : /*
5044 : * Do additional per-result-relation initialization.
5045 : */
5046 148462 : for (i = 0; i < nrels; i++)
5047 : {
5048 75013 : resultRelInfo = &mtstate->resultRelInfo[i];
5049 :
5050 : /* Let FDWs init themselves for foreign-table result rels */
5051 75013 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5052 74909 : resultRelInfo->ri_FdwRoutine != NULL &&
5053 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
5054 : {
5055 170 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
5056 :
5057 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
5058 : resultRelInfo,
5059 : fdw_private,
5060 : i,
5061 : eflags);
5062 : }
5063 :
5064 : /*
5065 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
5066 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
5067 : * tables, the FDW might have created additional junk attr(s), but
5068 : * those are no concern of ours.
5069 : */
5070 75013 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
5071 : operation == CMD_MERGE)
5072 : {
5073 : char relkind;
5074 :
5075 19344 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
5076 19344 : if (relkind == RELKIND_RELATION ||
5077 406 : relkind == RELKIND_MATVIEW ||
5078 : relkind == RELKIND_PARTITIONED_TABLE)
5079 : {
5080 18968 : resultRelInfo->ri_RowIdAttNo =
5081 18968 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
5082 :
5083 : /*
5084 : * For heap relations, a ctid junk attribute must be present.
5085 : * Partitioned tables should only appear here when all leaf
5086 : * partitions were pruned, in which case no rows can be
5087 : * produced and ctid is not needed.
5088 : */
5089 18968 : if (relkind == RELKIND_PARTITIONED_TABLE)
5090 : Assert(nrels == 1);
5091 18938 : else if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5092 0 : elog(ERROR, "could not find junk ctid column");
5093 : }
5094 376 : else if (relkind == RELKIND_FOREIGN_TABLE)
5095 : {
5096 : /*
5097 : * We don't support MERGE with foreign tables for now. (It's
5098 : * problematic because the implementation uses CTID.)
5099 : */
5100 : Assert(operation != CMD_MERGE);
5101 :
5102 : /*
5103 : * When there is a row-level trigger, there should be a
5104 : * wholerow attribute. We also require it to be present in
5105 : * UPDATE and MERGE, so we can get the values of unchanged
5106 : * columns.
5107 : */
5108 186 : resultRelInfo->ri_RowIdAttNo =
5109 186 : ExecFindJunkAttributeInTlist(subplan->targetlist,
5110 : "wholerow");
5111 186 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
5112 105 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5113 0 : elog(ERROR, "could not find junk wholerow column");
5114 : }
5115 : else
5116 : {
5117 : /* Other valid target relkinds must provide wholerow */
5118 190 : resultRelInfo->ri_RowIdAttNo =
5119 190 : ExecFindJunkAttributeInTlist(subplan->targetlist,
5120 : "wholerow");
5121 190 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5122 0 : elog(ERROR, "could not find junk wholerow column");
5123 : }
5124 : }
5125 : }
5126 :
5127 : /*
5128 : * If this is an inherited update/delete/merge, there will be a junk
5129 : * attribute named "tableoid" present in the subplan's targetlist. It
5130 : * will be used to identify the result relation for a given tuple to be
5131 : * updated/deleted/merged.
5132 : */
5133 73449 : mtstate->mt_resultOidAttno =
5134 73449 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
5135 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || total_nrels == 1);
5136 73449 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
5137 73449 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
5138 :
5139 : /* Get the root target relation */
5140 73449 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
5141 :
5142 : /*
5143 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
5144 : * or MERGE might need this too, but only if it actually moves tuples
5145 : * between partitions; in that case setup is done by
5146 : * ExecCrossPartitionUpdate.
5147 : */
5148 73449 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
5149 : operation == CMD_INSERT)
5150 3882 : mtstate->mt_partition_tuple_routing =
5151 3882 : ExecSetupPartitionTupleRouting(estate, rel);
5152 :
5153 : /*
5154 : * Initialize any WITH CHECK OPTION constraints if needed.
5155 : */
5156 73449 : resultRelInfo = mtstate->resultRelInfo;
5157 74501 : foreach(l, withCheckOptionLists)
5158 : {
5159 1052 : List *wcoList = (List *) lfirst(l);
5160 1052 : List *wcoExprs = NIL;
5161 : ListCell *ll;
5162 :
5163 3115 : foreach(ll, wcoList)
5164 : {
5165 2063 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
5166 2063 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
5167 : &mtstate->ps);
5168 :
5169 2063 : wcoExprs = lappend(wcoExprs, wcoExpr);
5170 : }
5171 :
5172 1052 : resultRelInfo->ri_WithCheckOptions = wcoList;
5173 1052 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
5174 1052 : resultRelInfo++;
5175 : }
5176 :
5177 : /*
5178 : * Initialize RETURNING projections if needed.
5179 : */
5180 73449 : if (returningLists)
5181 : {
5182 : TupleTableSlot *slot;
5183 : ExprContext *econtext;
5184 :
5185 : /*
5186 : * Initialize result tuple slot and assign its rowtype using the plan
5187 : * node's declared targetlist, which the planner set up to be the same
5188 : * as the first (before runtime pruning) RETURNING list. We assume
5189 : * all the result rels will produce compatible output.
5190 : */
5191 3549 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
5192 3549 : slot = mtstate->ps.ps_ResultTupleSlot;
5193 :
5194 : /* Need an econtext too */
5195 3549 : if (mtstate->ps.ps_ExprContext == NULL)
5196 3549 : ExecAssignExprContext(estate, &mtstate->ps);
5197 3549 : econtext = mtstate->ps.ps_ExprContext;
5198 :
5199 : /*
5200 : * Build a projection for each result rel.
5201 : */
5202 3549 : resultRelInfo = mtstate->resultRelInfo;
5203 7313 : foreach(l, returningLists)
5204 : {
5205 3764 : List *rlist = (List *) lfirst(l);
5206 :
5207 3764 : resultRelInfo->ri_returningList = rlist;
5208 3764 : resultRelInfo->ri_projectReturning =
5209 3764 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
5210 3764 : resultRelInfo->ri_RelationDesc->rd_att);
5211 3764 : resultRelInfo++;
5212 : }
5213 : }
5214 : else
5215 : {
5216 : /*
5217 : * We still must construct a dummy result tuple type, because InitPlan
5218 : * expects one (maybe should change that?).
5219 : */
5220 69900 : ExecInitResultTypeTL(&mtstate->ps);
5221 :
5222 69900 : mtstate->ps.ps_ExprContext = NULL;
5223 : }
5224 :
5225 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
5226 73449 : resultRelInfo = mtstate->resultRelInfo;
5227 73449 : if (node->onConflictAction != ONCONFLICT_NONE)
5228 : {
5229 : /* insert may only have one relation, inheritance is not expanded */
5230 : Assert(total_nrels == 1);
5231 1172 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
5232 : }
5233 :
5234 : /*
5235 : * For ON CONFLICT DO SELECT/UPDATE, initialize the ON CONFLICT action
5236 : * state.
5237 : */
5238 73449 : if (node->onConflictAction == ONCONFLICT_UPDATE ||
5239 72801 : node->onConflictAction == ONCONFLICT_SELECT)
5240 : {
5241 868 : OnConflictActionState *onconfl = makeNode(OnConflictActionState);
5242 :
5243 : /* already exists if created by RETURNING processing above */
5244 868 : if (mtstate->ps.ps_ExprContext == NULL)
5245 452 : ExecAssignExprContext(estate, &mtstate->ps);
5246 :
5247 : /* action state for DO SELECT/UPDATE */
5248 868 : resultRelInfo->ri_onConflict = onconfl;
5249 :
5250 : /* lock strength for DO SELECT [FOR UPDATE/SHARE] */
5251 868 : onconfl->oc_LockStrength = node->onConflictLockStrength;
5252 :
5253 : /* initialize slot for the existing tuple */
5254 868 : onconfl->oc_Existing =
5255 868 : table_slot_create(resultRelInfo->ri_RelationDesc,
5256 868 : &mtstate->ps.state->es_tupleTable);
5257 :
5258 : /*
5259 : * For ON CONFLICT DO UPDATE, initialize target list and projection.
5260 : */
5261 868 : if (node->onConflictAction == ONCONFLICT_UPDATE)
5262 : {
5263 : ExprContext *econtext;
5264 : TupleDesc relationDesc;
5265 :
5266 648 : econtext = mtstate->ps.ps_ExprContext;
5267 648 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
5268 :
5269 : /*
5270 : * Create the tuple slot for the UPDATE SET projection. We want a
5271 : * slot of the table's type here, because the slot will be used to
5272 : * insert into the table, and for RETURNING processing - which may
5273 : * access system attributes.
5274 : */
5275 648 : onconfl->oc_ProjSlot =
5276 648 : table_slot_create(resultRelInfo->ri_RelationDesc,
5277 648 : &mtstate->ps.state->es_tupleTable);
5278 :
5279 : /* build UPDATE SET projection state */
5280 648 : onconfl->oc_ProjInfo =
5281 648 : ExecBuildUpdateProjection(node->onConflictSet,
5282 : true,
5283 : node->onConflictCols,
5284 : relationDesc,
5285 : econtext,
5286 : onconfl->oc_ProjSlot,
5287 : &mtstate->ps);
5288 : }
5289 :
5290 : /* initialize state to evaluate the WHERE clause, if any */
5291 868 : if (node->onConflictWhere)
5292 : {
5293 : ExprState *qualexpr;
5294 :
5295 203 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
5296 : &mtstate->ps);
5297 203 : onconfl->oc_WhereClause = qualexpr;
5298 : }
5299 : }
5300 :
5301 : /*
5302 : * If we have any secondary relations in an UPDATE or DELETE, they need to
5303 : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
5304 : * EvalPlanQual mechanism needs to be told about them. This also goes for
5305 : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
5306 : */
5307 73449 : arowmarks = NIL;
5308 75317 : foreach(l, node->rowMarks)
5309 : {
5310 1868 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
5311 1868 : RangeTblEntry *rte = exec_rt_fetch(rc->rti, estate);
5312 : ExecRowMark *erm;
5313 : ExecAuxRowMark *aerm;
5314 :
5315 : /* ignore "parent" rowmarks; they are irrelevant at runtime */
5316 1868 : if (rc->isParent)
5317 94 : continue;
5318 :
5319 : /*
5320 : * Also ignore rowmarks belonging to child tables that have been
5321 : * pruned in ExecDoInitialPruning().
5322 : */
5323 1774 : if (rte->rtekind == RTE_RELATION &&
5324 1406 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
5325 0 : continue;
5326 :
5327 : /* Find ExecRowMark and build ExecAuxRowMark */
5328 1774 : erm = ExecFindRowMark(estate, rc->rti, false);
5329 1774 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
5330 1774 : arowmarks = lappend(arowmarks, aerm);
5331 : }
5332 :
5333 : /* For a MERGE command, initialize its state */
5334 73449 : if (mtstate->operation == CMD_MERGE)
5335 1051 : ExecInitMerge(mtstate, estate);
5336 :
5337 73449 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
5338 :
5339 : /*
5340 : * If there are a lot of result relations, use a hash table to speed the
5341 : * lookups. If there are not a lot, a simple linear search is faster.
5342 : *
5343 : * It's not clear where the threshold is, but try 64 for starters. In a
5344 : * debugging build, use a small threshold so that we get some test
5345 : * coverage of both code paths.
5346 : */
5347 : #ifdef USE_ASSERT_CHECKING
5348 : #define MT_NRELS_HASH 4
5349 : #else
5350 : #define MT_NRELS_HASH 64
5351 : #endif
5352 73449 : if (nrels >= MT_NRELS_HASH)
5353 : {
5354 : HASHCTL hash_ctl;
5355 :
5356 0 : hash_ctl.keysize = sizeof(Oid);
5357 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
5358 0 : hash_ctl.hcxt = CurrentMemoryContext;
5359 0 : mtstate->mt_resultOidHash =
5360 0 : hash_create("ModifyTable target hash",
5361 : nrels, &hash_ctl,
5362 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
5363 0 : for (i = 0; i < nrels; i++)
5364 : {
5365 : Oid hashkey;
5366 : MTTargetRelLookup *mtlookup;
5367 : bool found;
5368 :
5369 0 : resultRelInfo = &mtstate->resultRelInfo[i];
5370 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
5371 : mtlookup = (MTTargetRelLookup *)
5372 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
5373 : HASH_ENTER, &found);
5374 : Assert(!found);
5375 0 : mtlookup->relationIndex = i;
5376 : }
5377 : }
5378 : else
5379 73449 : mtstate->mt_resultOidHash = NULL;
5380 :
5381 : /*
5382 : * Determine if the FDW supports batch insert and determine the batch size
5383 : * (a FDW may support batching, but it may be disabled for the
5384 : * server/table).
5385 : *
5386 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
5387 : * remains set to 0.
5388 : */
5389 73449 : if (operation == CMD_INSERT)
5390 : {
5391 : /* insert may only have one relation, inheritance is not expanded */
5392 : Assert(total_nrels == 1);
5393 55669 : resultRelInfo = mtstate->resultRelInfo;
5394 55669 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5395 55669 : resultRelInfo->ri_FdwRoutine != NULL &&
5396 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
5397 88 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
5398 : {
5399 88 : resultRelInfo->ri_BatchSize =
5400 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
5401 88 : Assert(resultRelInfo->ri_BatchSize >= 1);
5402 : }
5403 : else
5404 55581 : resultRelInfo->ri_BatchSize = 1;
5405 : }
5406 :
5407 : /*
5408 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
5409 : * to estate->es_auxmodifytables so that it will be run to completion by
5410 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
5411 : * ModifyTable node too, but there's no need.) Note the use of lcons not
5412 : * lappend: we need later-initialized ModifyTable nodes to be shut down
5413 : * before earlier ones. This ensures that we don't throw away RETURNING
5414 : * rows that need to be seen by a later CTE subplan.
5415 : */
5416 73449 : if (!mtstate->canSetTag)
5417 644 : estate->es_auxmodifytables = lcons(mtstate,
5418 : estate->es_auxmodifytables);
5419 :
5420 73449 : return mtstate;
5421 : }
5422 :
5423 : /* ----------------------------------------------------------------
5424 : * ExecEndModifyTable
5425 : *
5426 : * Shuts down the plan.
5427 : *
5428 : * Returns nothing of interest.
5429 : * ----------------------------------------------------------------
5430 : */
5431 : void
5432 70554 : ExecEndModifyTable(ModifyTableState *node)
5433 : {
5434 : int i;
5435 :
5436 : /*
5437 : * Allow any FDWs to shut down
5438 : */
5439 142472 : for (i = 0; i < node->mt_nrels; i++)
5440 : {
5441 : int j;
5442 71918 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
5443 :
5444 71918 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5445 71822 : resultRelInfo->ri_FdwRoutine != NULL &&
5446 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
5447 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
5448 : resultRelInfo);
5449 :
5450 : /*
5451 : * Cleanup the initialized batch slots. This only matters for FDWs
5452 : * with batching, but the other cases will have ri_NumSlotsInitialized
5453 : * == 0.
5454 : */
5455 71946 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
5456 : {
5457 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
5458 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
5459 : }
5460 : }
5461 :
5462 : /*
5463 : * Close all the partitioned tables, leaf partitions, and their indices
5464 : * and release the slot used for tuple routing, if set.
5465 : */
5466 70554 : if (node->mt_partition_tuple_routing)
5467 : {
5468 3902 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
5469 :
5470 3902 : if (node->mt_root_tuple_slot)
5471 425 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
5472 : }
5473 :
5474 : /*
5475 : * Terminate EPQ execution if active
5476 : */
5477 70554 : EvalPlanQualEnd(&node->mt_epqstate);
5478 :
5479 : /*
5480 : * shut down subplan
5481 : */
5482 70554 : ExecEndNode(outerPlanState(node));
5483 70554 : }
5484 :
5485 : void
5486 0 : ExecReScanModifyTable(ModifyTableState *node)
5487 : {
5488 : /*
5489 : * Currently, we don't need to support rescan on ModifyTable nodes. The
5490 : * semantics of that would be a bit debatable anyway.
5491 : */
5492 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
5493 : }
|