Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * The relation to modify can be an ordinary table, a foreign table, or a
28 : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : * targeted a view not in one of those two categories, earlier processing
31 : * already pointed the ModifyTable result relation to an underlying
32 : * relation of that other view. This node does process
33 : * ri_WithCheckOptions, which may have expressions from those other,
34 : * automatically updatable views.
35 : *
36 : * MERGE runs a join between the source relation and the target table.
37 : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : * is an outer join that might output tuples without a matching target
39 : * tuple. In this case, any unmatched target tuples will have NULL
40 : * row-locating info, and only INSERT can be run. But for matched target
41 : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : * SOURCE, all tuples produced by the join will include a matching target
44 : * tuple, so all tuples contain row-locating info.
45 : *
46 : * If the query specifies RETURNING, then the ModifyTable returns a
47 : * RETURNING tuple after completing each row insert, update, or delete.
48 : * It must be called again to continue the operation. Without RETURNING,
49 : * we just loop within the node until all the work is done, then
50 : * return NULL. This avoids useless call/return overhead.
51 : */
52 :
53 : #include "postgres.h"
54 :
55 : #include "access/htup_details.h"
56 : #include "access/tableam.h"
57 : #include "access/xact.h"
58 : #include "commands/trigger.h"
59 : #include "executor/execPartition.h"
60 : #include "executor/executor.h"
61 : #include "executor/nodeModifyTable.h"
62 : #include "foreign/fdwapi.h"
63 : #include "miscadmin.h"
64 : #include "nodes/nodeFuncs.h"
65 : #include "optimizer/optimizer.h"
66 : #include "rewrite/rewriteHandler.h"
67 : #include "storage/lmgr.h"
68 : #include "utils/builtins.h"
69 : #include "utils/datum.h"
70 : #include "utils/rel.h"
71 : #include "utils/snapmgr.h"
72 :
73 :
74 : typedef struct MTTargetRelLookup
75 : {
76 : Oid relationOid; /* hash key, must be first */
77 : int relationIndex; /* rel's index in resultRelInfo[] array */
78 : } MTTargetRelLookup;
79 :
80 : /*
81 : * Context struct for a ModifyTable operation, containing basic execution
82 : * state and some output variables populated by ExecUpdateAct() and
83 : * ExecDeleteAct() to report the result of their actions to callers.
84 : */
85 : typedef struct ModifyTableContext
86 : {
87 : /* Operation state */
88 : ModifyTableState *mtstate;
89 : EPQState *epqstate;
90 : EState *estate;
91 :
92 : /*
93 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
94 : * access "junk" columns that are not going to be stored.
95 : */
96 : TupleTableSlot *planSlot;
97 :
98 : /*
99 : * Information about the changes that were made concurrently to a tuple
100 : * being updated or deleted
101 : */
102 : TM_FailureData tmfd;
103 :
104 : /*
105 : * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
106 : * clause that refers to OLD columns (converted to the root's tuple
107 : * descriptor).
108 : */
109 : TupleTableSlot *cpDeletedSlot;
110 :
111 : /*
112 : * The tuple projected by the INSERT's RETURNING clause, when doing a
113 : * cross-partition UPDATE
114 : */
115 : TupleTableSlot *cpUpdateReturningSlot;
116 : } ModifyTableContext;
117 :
118 : /*
119 : * Context struct containing output data specific to UPDATE operations.
120 : */
121 : typedef struct UpdateContext
122 : {
123 : bool crossPartUpdate; /* was it a cross-partition update? */
124 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
125 :
126 : /*
127 : * Lock mode to acquire on the latest tuple version before performing
128 : * EvalPlanQual on it
129 : */
130 : LockTupleMode lockmode;
131 : } UpdateContext;
132 :
133 :
134 : static void ExecBatchInsert(ModifyTableState *mtstate,
135 : ResultRelInfo *resultRelInfo,
136 : TupleTableSlot **slots,
137 : TupleTableSlot **planSlots,
138 : int numSlots,
139 : EState *estate,
140 : bool canSetTag);
141 : static void ExecPendingInserts(EState *estate);
142 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
143 : ResultRelInfo *sourcePartInfo,
144 : ResultRelInfo *destPartInfo,
145 : ItemPointer tupleid,
146 : TupleTableSlot *oldslot,
147 : TupleTableSlot *newslot);
148 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
149 : ResultRelInfo *resultRelInfo,
150 : ItemPointer conflictTid,
151 : TupleTableSlot *excludedSlot,
152 : bool canSetTag,
153 : TupleTableSlot **returning);
154 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
155 : EState *estate,
156 : PartitionTupleRouting *proute,
157 : ResultRelInfo *targetRelInfo,
158 : TupleTableSlot *slot,
159 : ResultRelInfo **partRelInfo);
160 :
161 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
162 : ResultRelInfo *resultRelInfo,
163 : ItemPointer tupleid,
164 : HeapTuple oldtuple,
165 : bool canSetTag);
166 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
167 : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
168 : ResultRelInfo *resultRelInfo,
169 : ItemPointer tupleid,
170 : HeapTuple oldtuple,
171 : bool canSetTag,
172 : bool *matched);
173 : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
174 : ResultRelInfo *resultRelInfo,
175 : bool canSetTag);
176 :
177 :
178 : /*
179 : * Verify that the tuples to be produced by INSERT match the
180 : * target relation's rowtype
181 : *
182 : * We do this to guard against stale plans. If plan invalidation is
183 : * functioning properly then we should never get a failure here, but better
184 : * safe than sorry. Note that this is called after we have obtained lock
185 : * on the target rel, so the rowtype can't change underneath us.
186 : *
187 : * The plan output is represented by its targetlist, because that makes
188 : * handling the dropped-column case easier.
189 : *
190 : * We used to use this for UPDATE as well, but now the equivalent checks
191 : * are done in ExecBuildUpdateProjection.
192 : */
193 : static void
194 92960 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
195 : {
196 92960 : TupleDesc resultDesc = RelationGetDescr(resultRel);
197 92960 : int attno = 0;
198 : ListCell *lc;
199 :
200 285664 : foreach(lc, targetList)
201 : {
202 192704 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
203 : Form_pg_attribute attr;
204 :
205 : Assert(!tle->resjunk); /* caller removed junk items already */
206 :
207 192704 : if (attno >= resultDesc->natts)
208 0 : ereport(ERROR,
209 : (errcode(ERRCODE_DATATYPE_MISMATCH),
210 : errmsg("table row type and query-specified row type do not match"),
211 : errdetail("Query has too many columns.")));
212 192704 : attr = TupleDescAttr(resultDesc, attno);
213 192704 : attno++;
214 :
215 : /*
216 : * Special cases here should match planner's expand_insert_targetlist.
217 : */
218 192704 : if (attr->attisdropped)
219 : {
220 : /*
221 : * For a dropped column, we can't check atttypid (it's likely 0).
222 : * In any case the planner has most likely inserted an INT4 null.
223 : * What we insist on is just *some* NULL constant.
224 : */
225 622 : if (!IsA(tle->expr, Const) ||
226 622 : !((Const *) tle->expr)->constisnull)
227 0 : ereport(ERROR,
228 : (errcode(ERRCODE_DATATYPE_MISMATCH),
229 : errmsg("table row type and query-specified row type do not match"),
230 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
231 : attno)));
232 : }
233 192082 : else if (attr->attgenerated)
234 : {
235 : /*
236 : * For a generated column, the planner will have inserted a null
237 : * of the column's base type (to avoid possibly failing on domain
238 : * not-null constraints). It doesn't seem worth insisting on that
239 : * exact type though, since a null value is type-independent. As
240 : * above, just insist on *some* NULL constant.
241 : */
242 1122 : if (!IsA(tle->expr, Const) ||
243 1122 : !((Const *) tle->expr)->constisnull)
244 0 : ereport(ERROR,
245 : (errcode(ERRCODE_DATATYPE_MISMATCH),
246 : errmsg("table row type and query-specified row type do not match"),
247 : errdetail("Query provides a value for a generated column at ordinal position %d.",
248 : attno)));
249 : }
250 : else
251 : {
252 : /* Normal case: demand type match */
253 190960 : if (exprType((Node *) tle->expr) != attr->atttypid)
254 0 : ereport(ERROR,
255 : (errcode(ERRCODE_DATATYPE_MISMATCH),
256 : errmsg("table row type and query-specified row type do not match"),
257 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
258 : format_type_be(attr->atttypid),
259 : attno,
260 : format_type_be(exprType((Node *) tle->expr)))));
261 : }
262 : }
263 92960 : if (attno != resultDesc->natts)
264 0 : ereport(ERROR,
265 : (errcode(ERRCODE_DATATYPE_MISMATCH),
266 : errmsg("table row type and query-specified row type do not match"),
267 : errdetail("Query has too few columns.")));
268 92960 : }
269 :
270 : /*
271 : * ExecProcessReturning --- evaluate a RETURNING list
272 : *
273 : * context: context for the ModifyTable operation
274 : * resultRelInfo: current result rel
275 : * cmdType: operation/merge action performed (INSERT, UPDATE, or DELETE)
276 : * oldSlot: slot holding old tuple deleted or updated
277 : * newSlot: slot holding new tuple inserted or updated
278 : * planSlot: slot holding tuple returned by top subplan node
279 : *
280 : * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
281 : * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
282 : * modify is disabled if the RETURNING list refers to any OLD/NEW values).
283 : *
284 : * Returns a slot holding the result tuple
285 : */
286 : static TupleTableSlot *
287 8012 : ExecProcessReturning(ModifyTableContext *context,
288 : ResultRelInfo *resultRelInfo,
289 : CmdType cmdType,
290 : TupleTableSlot *oldSlot,
291 : TupleTableSlot *newSlot,
292 : TupleTableSlot *planSlot)
293 : {
294 8012 : EState *estate = context->estate;
295 8012 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
296 8012 : ExprContext *econtext = projectReturning->pi_exprContext;
297 :
298 : /* Make tuple and any needed join variables available to ExecProject */
299 8012 : switch (cmdType)
300 : {
301 6600 : case CMD_INSERT:
302 : case CMD_UPDATE:
303 : /* return new tuple by default */
304 6600 : if (newSlot)
305 6144 : econtext->ecxt_scantuple = newSlot;
306 6600 : break;
307 :
308 1412 : case CMD_DELETE:
309 : /* return old tuple by default */
310 1412 : if (oldSlot)
311 1174 : econtext->ecxt_scantuple = oldSlot;
312 1412 : break;
313 :
314 0 : default:
315 0 : elog(ERROR, "unrecognized commandType: %d", (int) cmdType);
316 : }
317 8012 : econtext->ecxt_outertuple = planSlot;
318 :
319 : /* Make old/new tuples available to ExecProject, if required */
320 8012 : if (oldSlot)
321 3782 : econtext->ecxt_oldtuple = oldSlot;
322 4230 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
323 180 : econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
324 : else
325 4050 : econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
326 :
327 8012 : if (newSlot)
328 6144 : econtext->ecxt_newtuple = newSlot;
329 1868 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
330 132 : econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
331 : else
332 1736 : econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
333 :
334 : /*
335 : * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
336 : * information is required to evaluate ReturningExpr nodes and also in
337 : * ExecEvalSysVar() and ExecEvalWholeRowVar().
338 : */
339 8012 : if (oldSlot == NULL)
340 4230 : projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
341 : else
342 3782 : projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
343 :
344 8012 : if (newSlot == NULL)
345 1868 : projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
346 : else
347 6144 : projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
348 :
349 : /* Compute the RETURNING expressions */
350 8012 : return ExecProject(projectReturning);
351 : }
352 :
353 : /*
354 : * ExecCheckTupleVisible -- verify tuple is visible
355 : *
356 : * It would not be consistent with guarantees of the higher isolation levels to
357 : * proceed with avoiding insertion (taking speculative insertion's alternative
358 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
359 : * Check for the need to raise a serialization failure, and do so as necessary.
360 : */
361 : static void
362 5246 : ExecCheckTupleVisible(EState *estate,
363 : Relation rel,
364 : TupleTableSlot *slot)
365 : {
366 5246 : if (!IsolationUsesXactSnapshot())
367 5182 : return;
368 :
369 64 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
370 : {
371 : Datum xminDatum;
372 : TransactionId xmin;
373 : bool isnull;
374 :
375 40 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
376 : Assert(!isnull);
377 40 : xmin = DatumGetTransactionId(xminDatum);
378 :
379 : /*
380 : * We should not raise a serialization failure if the conflict is
381 : * against a tuple inserted by our own transaction, even if it's not
382 : * visible to our snapshot. (This would happen, for example, if
383 : * conflicting keys are proposed for insertion in a single command.)
384 : */
385 40 : if (!TransactionIdIsCurrentTransactionId(xmin))
386 20 : ereport(ERROR,
387 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
388 : errmsg("could not serialize access due to concurrent update")));
389 : }
390 : }
391 :
392 : /*
393 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
394 : */
395 : static void
396 212 : ExecCheckTIDVisible(EState *estate,
397 : ResultRelInfo *relinfo,
398 : ItemPointer tid,
399 : TupleTableSlot *tempSlot)
400 : {
401 212 : Relation rel = relinfo->ri_RelationDesc;
402 :
403 : /* Redundantly check isolation level */
404 212 : if (!IsolationUsesXactSnapshot())
405 148 : return;
406 :
407 64 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
408 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
409 64 : ExecCheckTupleVisible(estate, rel, tempSlot);
410 44 : ExecClearTuple(tempSlot);
411 : }
412 :
413 : /*
414 : * Initialize generated columns handling for a tuple
415 : *
416 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or
417 : * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
418 : * This is used only for stored generated columns.
419 : *
420 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
421 : * This is used by both stored and virtual generated columns.
422 : *
423 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
424 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
425 : * cross-partition UPDATEs, since a partition might be the target of both
426 : * UPDATE and INSERT actions.
427 : */
428 : void
429 59166 : ExecInitGenerated(ResultRelInfo *resultRelInfo,
430 : EState *estate,
431 : CmdType cmdtype)
432 : {
433 59166 : Relation rel = resultRelInfo->ri_RelationDesc;
434 59166 : TupleDesc tupdesc = RelationGetDescr(rel);
435 59166 : int natts = tupdesc->natts;
436 : ExprState **ri_GeneratedExprs;
437 : int ri_NumGeneratedNeeded;
438 : Bitmapset *updatedCols;
439 : MemoryContext oldContext;
440 :
441 : /* Nothing to do if no generated columns */
442 59166 : if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual)))
443 58090 : return;
444 :
445 : /*
446 : * In an UPDATE, we can skip computing any generated columns that do not
447 : * depend on any UPDATE target column. But if there is a BEFORE ROW
448 : * UPDATE trigger, we cannot skip because the trigger might change more
449 : * columns.
450 : */
451 1076 : if (cmdtype == CMD_UPDATE &&
452 250 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
453 206 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
454 : else
455 870 : updatedCols = NULL;
456 :
457 : /*
458 : * Make sure these data structures are built in the per-query memory
459 : * context so they'll survive throughout the query.
460 : */
461 1076 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
462 :
463 1076 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
464 1076 : ri_NumGeneratedNeeded = 0;
465 :
466 4140 : for (int i = 0; i < natts; i++)
467 : {
468 3070 : char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated;
469 :
470 3070 : if (attgenerated)
471 : {
472 : Expr *expr;
473 :
474 : /* Fetch the GENERATED AS expression tree */
475 1166 : expr = (Expr *) build_column_default(rel, i + 1);
476 1166 : if (expr == NULL)
477 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
478 : i + 1, RelationGetRelationName(rel));
479 :
480 : /*
481 : * If it's an update with a known set of update target columns,
482 : * see if we can skip the computation.
483 : */
484 1166 : if (updatedCols)
485 : {
486 220 : Bitmapset *attrs_used = NULL;
487 :
488 220 : pull_varattnos((Node *) expr, 1, &attrs_used);
489 :
490 220 : if (!bms_overlap(updatedCols, attrs_used))
491 24 : continue; /* need not update this column */
492 : }
493 :
494 : /* No luck, so prepare the expression for execution */
495 1142 : if (attgenerated == ATTRIBUTE_GENERATED_STORED)
496 : {
497 1058 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
498 1052 : ri_NumGeneratedNeeded++;
499 : }
500 :
501 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
502 1136 : if (cmdtype == CMD_UPDATE)
503 248 : resultRelInfo->ri_extraUpdatedCols =
504 248 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
505 : i + 1 - FirstLowInvalidHeapAttributeNumber);
506 : }
507 : }
508 :
509 1070 : if (ri_NumGeneratedNeeded == 0)
510 : {
511 : /* didn't need it after all */
512 42 : pfree(ri_GeneratedExprs);
513 42 : ri_GeneratedExprs = NULL;
514 : }
515 :
516 : /* Save in appropriate set of fields */
517 1070 : if (cmdtype == CMD_UPDATE)
518 : {
519 : /* Don't call twice */
520 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
521 :
522 250 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
523 250 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
524 :
525 250 : resultRelInfo->ri_extraUpdatedCols_valid = true;
526 : }
527 : else
528 : {
529 : /* Don't call twice */
530 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
531 :
532 820 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
533 820 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
534 : }
535 :
536 1070 : MemoryContextSwitchTo(oldContext);
537 : }
538 :
539 : /*
540 : * Compute stored generated columns for a tuple
541 : */
542 : void
543 1458 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
544 : EState *estate, TupleTableSlot *slot,
545 : CmdType cmdtype)
546 : {
547 1458 : Relation rel = resultRelInfo->ri_RelationDesc;
548 1458 : TupleDesc tupdesc = RelationGetDescr(rel);
549 1458 : int natts = tupdesc->natts;
550 1458 : ExprContext *econtext = GetPerTupleExprContext(estate);
551 : ExprState **ri_GeneratedExprs;
552 : MemoryContext oldContext;
553 : Datum *values;
554 : bool *nulls;
555 :
556 : /* We should not be called unless this is true */
557 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
558 :
559 : /*
560 : * Initialize the expressions if we didn't already, and check whether we
561 : * can exit early because nothing needs to be computed.
562 : */
563 1458 : if (cmdtype == CMD_UPDATE)
564 : {
565 262 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
566 200 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
567 262 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
568 18 : return;
569 244 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
570 : }
571 : else
572 : {
573 1196 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
574 826 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
575 : /* Early exit is impossible given the prior Assert */
576 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
577 1190 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
578 : }
579 :
580 1434 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
581 :
582 1434 : values = palloc(sizeof(*values) * natts);
583 1434 : nulls = palloc(sizeof(*nulls) * natts);
584 :
585 1434 : slot_getallattrs(slot);
586 1434 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
587 :
588 5364 : for (int i = 0; i < natts; i++)
589 : {
590 3954 : CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
591 :
592 3954 : if (ri_GeneratedExprs[i])
593 : {
594 : Datum val;
595 : bool isnull;
596 :
597 : Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
598 :
599 1468 : econtext->ecxt_scantuple = slot;
600 :
601 1468 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
602 :
603 : /*
604 : * We must make a copy of val as we have no guarantees about where
605 : * memory for a pass-by-reference Datum is located.
606 : */
607 1444 : if (!isnull)
608 1390 : val = datumCopy(val, attr->attbyval, attr->attlen);
609 :
610 1444 : values[i] = val;
611 1444 : nulls[i] = isnull;
612 : }
613 : else
614 : {
615 2486 : if (!nulls[i])
616 2336 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
617 : }
618 : }
619 :
620 1410 : ExecClearTuple(slot);
621 1410 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
622 1410 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
623 1410 : ExecStoreVirtualTuple(slot);
624 1410 : ExecMaterializeSlot(slot);
625 :
626 1410 : MemoryContextSwitchTo(oldContext);
627 : }
628 :
629 : /*
630 : * ExecInitInsertProjection
631 : * Do one-time initialization of projection data for INSERT tuples.
632 : *
633 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
634 : *
635 : * This is also a convenient place to verify that the
636 : * output of an INSERT matches the target table.
637 : */
638 : static void
639 91944 : ExecInitInsertProjection(ModifyTableState *mtstate,
640 : ResultRelInfo *resultRelInfo)
641 : {
642 91944 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
643 91944 : Plan *subplan = outerPlan(node);
644 91944 : EState *estate = mtstate->ps.state;
645 91944 : List *insertTargetList = NIL;
646 91944 : bool need_projection = false;
647 : ListCell *l;
648 :
649 : /* Extract non-junk columns of the subplan's result tlist. */
650 282042 : foreach(l, subplan->targetlist)
651 : {
652 190098 : TargetEntry *tle = (TargetEntry *) lfirst(l);
653 :
654 190098 : if (!tle->resjunk)
655 190098 : insertTargetList = lappend(insertTargetList, tle);
656 : else
657 0 : need_projection = true;
658 : }
659 :
660 : /*
661 : * The junk-free list must produce a tuple suitable for the result
662 : * relation.
663 : */
664 91944 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
665 :
666 : /* We'll need a slot matching the table's format. */
667 91944 : resultRelInfo->ri_newTupleSlot =
668 91944 : table_slot_create(resultRelInfo->ri_RelationDesc,
669 : &estate->es_tupleTable);
670 :
671 : /* Build ProjectionInfo if needed (it probably isn't). */
672 91944 : if (need_projection)
673 : {
674 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
675 :
676 : /* need an expression context to do the projection */
677 0 : if (mtstate->ps.ps_ExprContext == NULL)
678 0 : ExecAssignExprContext(estate, &mtstate->ps);
679 :
680 0 : resultRelInfo->ri_projectNew =
681 0 : ExecBuildProjectionInfo(insertTargetList,
682 : mtstate->ps.ps_ExprContext,
683 : resultRelInfo->ri_newTupleSlot,
684 : &mtstate->ps,
685 : relDesc);
686 : }
687 :
688 91944 : resultRelInfo->ri_projectNewInfoValid = true;
689 91944 : }
690 :
691 : /*
692 : * ExecInitUpdateProjection
693 : * Do one-time initialization of projection data for UPDATE tuples.
694 : *
695 : * UPDATE always needs a projection, because (1) there's always some junk
696 : * attrs, and (2) we may need to merge values of not-updated columns from
697 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
698 : * the subplan contains only new values for the changed columns, plus row
699 : * identity info in the junk attrs.
700 : *
701 : * This is "one-time" for any given result rel, but we might touch more than
702 : * one result rel in the course of an inherited UPDATE, and each one needs
703 : * its own projection due to possible column order variation.
704 : *
705 : * This is also a convenient place to verify that the output of an UPDATE
706 : * matches the target table (ExecBuildUpdateProjection does that).
707 : */
708 : static void
709 13162 : ExecInitUpdateProjection(ModifyTableState *mtstate,
710 : ResultRelInfo *resultRelInfo)
711 : {
712 13162 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
713 13162 : Plan *subplan = outerPlan(node);
714 13162 : EState *estate = mtstate->ps.state;
715 13162 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
716 : int whichrel;
717 : List *updateColnos;
718 :
719 : /*
720 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
721 : * to, we can get the index the hard way with an integer division.
722 : */
723 13162 : whichrel = mtstate->mt_lastResultIndex;
724 13162 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
725 : {
726 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
727 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
728 : }
729 :
730 13162 : updateColnos = (List *) list_nth(mtstate->mt_updateColnosLists, whichrel);
731 :
732 : /*
733 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
734 : * produced by the subplan to get the new tuple. We need two slots, both
735 : * matching the table's desired format.
736 : */
737 13162 : resultRelInfo->ri_oldTupleSlot =
738 13162 : table_slot_create(resultRelInfo->ri_RelationDesc,
739 : &estate->es_tupleTable);
740 13162 : resultRelInfo->ri_newTupleSlot =
741 13162 : table_slot_create(resultRelInfo->ri_RelationDesc,
742 : &estate->es_tupleTable);
743 :
744 : /* need an expression context to do the projection */
745 13162 : if (mtstate->ps.ps_ExprContext == NULL)
746 11778 : ExecAssignExprContext(estate, &mtstate->ps);
747 :
748 13162 : resultRelInfo->ri_projectNew =
749 13162 : ExecBuildUpdateProjection(subplan->targetlist,
750 : false, /* subplan did the evaluation */
751 : updateColnos,
752 : relDesc,
753 : mtstate->ps.ps_ExprContext,
754 : resultRelInfo->ri_newTupleSlot,
755 : &mtstate->ps);
756 :
757 13162 : resultRelInfo->ri_projectNewInfoValid = true;
758 13162 : }
759 :
760 : /*
761 : * ExecGetInsertNewTuple
762 : * This prepares a "new" tuple ready to be inserted into given result
763 : * relation, by removing any junk columns of the plan's output tuple
764 : * and (if necessary) coercing the tuple to the right tuple format.
765 : */
766 : static TupleTableSlot *
767 12161782 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
768 : TupleTableSlot *planSlot)
769 : {
770 12161782 : ProjectionInfo *newProj = relinfo->ri_projectNew;
771 : ExprContext *econtext;
772 :
773 : /*
774 : * If there's no projection to be done, just make sure the slot is of the
775 : * right type for the target rel. If the planSlot is the right type we
776 : * can use it as-is, else copy the data into ri_newTupleSlot.
777 : */
778 12161782 : if (newProj == NULL)
779 : {
780 12161782 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
781 : {
782 11384746 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
783 11384746 : return relinfo->ri_newTupleSlot;
784 : }
785 : else
786 777036 : return planSlot;
787 : }
788 :
789 : /*
790 : * Else project; since the projection output slot is ri_newTupleSlot, this
791 : * will also fix any slot-type problem.
792 : *
793 : * Note: currently, this is dead code, because INSERT cases don't receive
794 : * any junk columns so there's never a projection to be done.
795 : */
796 0 : econtext = newProj->pi_exprContext;
797 0 : econtext->ecxt_outertuple = planSlot;
798 0 : return ExecProject(newProj);
799 : }
800 :
801 : /*
802 : * ExecGetUpdateNewTuple
803 : * This prepares a "new" tuple by combining an UPDATE subplan's output
804 : * tuple (which contains values of changed columns) with unchanged
805 : * columns taken from the old tuple.
806 : *
807 : * The subplan tuple might also contain junk columns, which are ignored.
808 : * Note that the projection also ensures we have a slot of the right type.
809 : */
810 : TupleTableSlot *
811 317356 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
812 : TupleTableSlot *planSlot,
813 : TupleTableSlot *oldSlot)
814 : {
815 317356 : ProjectionInfo *newProj = relinfo->ri_projectNew;
816 : ExprContext *econtext;
817 :
818 : /* Use a few extra Asserts to protect against outside callers */
819 : Assert(relinfo->ri_projectNewInfoValid);
820 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
821 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
822 :
823 317356 : econtext = newProj->pi_exprContext;
824 317356 : econtext->ecxt_outertuple = planSlot;
825 317356 : econtext->ecxt_scantuple = oldSlot;
826 317356 : return ExecProject(newProj);
827 : }
828 :
829 : /* ----------------------------------------------------------------
830 : * ExecInsert
831 : *
832 : * For INSERT, we have to insert the tuple into the target relation
833 : * (or partition thereof) and insert appropriate tuples into the index
834 : * relations.
835 : *
836 : * slot contains the new tuple value to be stored.
837 : *
838 : * Returns RETURNING result if any, otherwise NULL.
839 : * *inserted_tuple is the tuple that's effectively inserted;
840 : * *insert_destrel is the relation where it was inserted.
841 : * These are only set on success.
842 : *
843 : * This may change the currently active tuple conversion map in
844 : * mtstate->mt_transition_capture, so the callers must take care to
845 : * save the previous value to avoid losing track of it.
846 : * ----------------------------------------------------------------
847 : */
848 : static TupleTableSlot *
849 12164540 : ExecInsert(ModifyTableContext *context,
850 : ResultRelInfo *resultRelInfo,
851 : TupleTableSlot *slot,
852 : bool canSetTag,
853 : TupleTableSlot **inserted_tuple,
854 : ResultRelInfo **insert_destrel)
855 : {
856 12164540 : ModifyTableState *mtstate = context->mtstate;
857 12164540 : EState *estate = context->estate;
858 : Relation resultRelationDesc;
859 12164540 : List *recheckIndexes = NIL;
860 12164540 : TupleTableSlot *planSlot = context->planSlot;
861 12164540 : TupleTableSlot *result = NULL;
862 : TransitionCaptureState *ar_insert_trig_tcs;
863 12164540 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
864 12164540 : OnConflictAction onconflict = node->onConflictAction;
865 12164540 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
866 : MemoryContext oldContext;
867 :
868 : /*
869 : * If the input result relation is a partitioned table, find the leaf
870 : * partition to insert the tuple into.
871 : */
872 12164540 : if (proute)
873 : {
874 : ResultRelInfo *partRelInfo;
875 :
876 728272 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
877 : resultRelInfo, slot,
878 : &partRelInfo);
879 728068 : resultRelInfo = partRelInfo;
880 : }
881 :
882 12164336 : ExecMaterializeSlot(slot);
883 :
884 12164336 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
885 :
886 : /*
887 : * Open the table's indexes, if we have not done so already, so that we
888 : * can add new index entries for the inserted tuple.
889 : */
890 12164336 : if (resultRelationDesc->rd_rel->relhasindex &&
891 3098790 : resultRelInfo->ri_IndexRelationDescs == NULL)
892 31870 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
893 :
894 : /*
895 : * BEFORE ROW INSERT Triggers.
896 : *
897 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
898 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
899 : * violations before firing these triggers, because they can change the
900 : * values to insert. Also, they can run arbitrary user-defined code with
901 : * side-effects that we can't cancel by just not inserting the tuple.
902 : */
903 12164336 : if (resultRelInfo->ri_TrigDesc &&
904 75100 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
905 : {
906 : /* Flush any pending inserts, so rows are visible to the triggers */
907 2054 : if (estate->es_insert_pending_result_relations != NIL)
908 6 : ExecPendingInserts(estate);
909 :
910 2054 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
911 200 : return NULL; /* "do nothing" */
912 : }
913 :
914 : /* INSTEAD OF ROW INSERT Triggers */
915 12164038 : if (resultRelInfo->ri_TrigDesc &&
916 74802 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
917 : {
918 168 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
919 6 : return NULL; /* "do nothing" */
920 : }
921 12163870 : else if (resultRelInfo->ri_FdwRoutine)
922 : {
923 : /*
924 : * GENERATED expressions might reference the tableoid column, so
925 : * (re-)initialize tts_tableOid before evaluating them.
926 : */
927 2014 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
928 :
929 : /*
930 : * Compute stored generated columns
931 : */
932 2014 : if (resultRelationDesc->rd_att->constr &&
933 366 : resultRelationDesc->rd_att->constr->has_generated_stored)
934 8 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
935 : CMD_INSERT);
936 :
937 : /*
938 : * If the FDW supports batching, and batching is requested, accumulate
939 : * rows and insert them in batches. Otherwise use the per-row inserts.
940 : */
941 2014 : if (resultRelInfo->ri_BatchSize > 1)
942 : {
943 288 : bool flushed = false;
944 :
945 : /*
946 : * When we've reached the desired batch size, perform the
947 : * insertion.
948 : */
949 288 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
950 : {
951 20 : ExecBatchInsert(mtstate, resultRelInfo,
952 : resultRelInfo->ri_Slots,
953 : resultRelInfo->ri_PlanSlots,
954 : resultRelInfo->ri_NumSlots,
955 : estate, canSetTag);
956 20 : flushed = true;
957 : }
958 :
959 288 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
960 :
961 288 : if (resultRelInfo->ri_Slots == NULL)
962 : {
963 56 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
964 28 : resultRelInfo->ri_BatchSize);
965 28 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
966 28 : resultRelInfo->ri_BatchSize);
967 : }
968 :
969 : /*
970 : * Initialize the batch slots. We don't know how many slots will
971 : * be needed, so we initialize them as the batch grows, and we
972 : * keep them across batches. To mitigate an inefficiency in how
973 : * resource owner handles objects with many references (as with
974 : * many slots all referencing the same tuple descriptor) we copy
975 : * the appropriate tuple descriptor for each slot.
976 : */
977 288 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
978 : {
979 142 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
980 : TupleDesc plan_tdesc =
981 142 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
982 :
983 284 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
984 142 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
985 :
986 284 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
987 142 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
988 :
989 : /* remember how many batch slots we initialized */
990 142 : resultRelInfo->ri_NumSlotsInitialized++;
991 : }
992 :
993 288 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
994 : slot);
995 :
996 288 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
997 : planSlot);
998 :
999 : /*
1000 : * If these are the first tuples stored in the buffers, add the
1001 : * target rel and the mtstate to the
1002 : * es_insert_pending_result_relations and
1003 : * es_insert_pending_modifytables lists respectively, except in
1004 : * the case where flushing was done above, in which case they
1005 : * would already have been added to the lists, so no need to do
1006 : * this.
1007 : */
1008 288 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
1009 : {
1010 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
1011 : resultRelInfo));
1012 36 : estate->es_insert_pending_result_relations =
1013 36 : lappend(estate->es_insert_pending_result_relations,
1014 : resultRelInfo);
1015 36 : estate->es_insert_pending_modifytables =
1016 36 : lappend(estate->es_insert_pending_modifytables, mtstate);
1017 : }
1018 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
1019 : resultRelInfo));
1020 :
1021 288 : resultRelInfo->ri_NumSlots++;
1022 :
1023 288 : MemoryContextSwitchTo(oldContext);
1024 :
1025 288 : return NULL;
1026 : }
1027 :
1028 : /*
1029 : * insert into foreign table: let the FDW do it
1030 : */
1031 1726 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
1032 : resultRelInfo,
1033 : slot,
1034 : planSlot);
1035 :
1036 1720 : if (slot == NULL) /* "do nothing" */
1037 4 : return NULL;
1038 :
1039 : /*
1040 : * AFTER ROW Triggers or RETURNING expressions might reference the
1041 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
1042 : * them. (This covers the case where the FDW replaced the slot.)
1043 : */
1044 1716 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1045 : }
1046 : else
1047 : {
1048 : WCOKind wco_kind;
1049 :
1050 : /*
1051 : * Constraints and GENERATED expressions might reference the tableoid
1052 : * column, so (re-)initialize tts_tableOid before evaluating them.
1053 : */
1054 12161856 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1055 :
1056 : /*
1057 : * Compute stored generated columns
1058 : */
1059 12161856 : if (resultRelationDesc->rd_att->constr &&
1060 3712500 : resultRelationDesc->rd_att->constr->has_generated_stored)
1061 1058 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1062 : CMD_INSERT);
1063 :
1064 : /*
1065 : * Check any RLS WITH CHECK policies.
1066 : *
1067 : * Normally we should check INSERT policies. But if the insert is the
1068 : * result of a partition key update that moved the tuple to a new
1069 : * partition, we should instead check UPDATE policies, because we are
1070 : * executing policies defined on the target table, and not those
1071 : * defined on the child partitions.
1072 : *
1073 : * If we're running MERGE, we refer to the action that we're executing
1074 : * to know if we're doing an INSERT or UPDATE to a partition table.
1075 : */
1076 12161826 : if (mtstate->operation == CMD_UPDATE)
1077 782 : wco_kind = WCO_RLS_UPDATE_CHECK;
1078 12161044 : else if (mtstate->operation == CMD_MERGE)
1079 1736 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1080 1736 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
1081 : else
1082 12159308 : wco_kind = WCO_RLS_INSERT_CHECK;
1083 :
1084 : /*
1085 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1086 : * we are looking for at this point.
1087 : */
1088 12161826 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1089 582 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1090 :
1091 : /*
1092 : * Check the constraints of the tuple.
1093 : */
1094 12161640 : if (resultRelationDesc->rd_att->constr)
1095 3712374 : ExecConstraints(resultRelInfo, slot, estate);
1096 :
1097 : /*
1098 : * Also check the tuple against the partition constraint, if there is
1099 : * one; except that if we got here via tuple-routing, we don't need to
1100 : * if there's no BR trigger defined on the partition.
1101 : */
1102 12160934 : if (resultRelationDesc->rd_rel->relispartition &&
1103 732296 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1104 727478 : (resultRelInfo->ri_TrigDesc &&
1105 1526 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1106 5014 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1107 :
1108 12160766 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1109 4112 : {
1110 : /* Perform a speculative insertion. */
1111 : uint32 specToken;
1112 : ItemPointerData conflictTid;
1113 : ItemPointerData invalidItemPtr;
1114 : bool specConflict;
1115 : List *arbiterIndexes;
1116 :
1117 9542 : ItemPointerSetInvalid(&invalidItemPtr);
1118 9542 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1119 :
1120 : /*
1121 : * Do a non-conclusive check for conflicts first.
1122 : *
1123 : * We're not holding any locks yet, so this doesn't guarantee that
1124 : * the later insert won't conflict. But it avoids leaving behind
1125 : * a lot of canceled speculative insertions, if you run a lot of
1126 : * INSERT ON CONFLICT statements that do conflict.
1127 : *
1128 : * We loop back here if we find a conflict below, either during
1129 : * the pre-check, or when we re-check after inserting the tuple
1130 : * speculatively. Better allow interrupts in case some bug makes
1131 : * this an infinite loop.
1132 : */
1133 9552 : vlock:
1134 9552 : CHECK_FOR_INTERRUPTS();
1135 9552 : specConflict = false;
1136 9552 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1137 : &conflictTid, &invalidItemPtr,
1138 : arbiterIndexes))
1139 : {
1140 : /* committed conflict tuple found */
1141 5418 : if (onconflict == ONCONFLICT_UPDATE)
1142 : {
1143 : /*
1144 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1145 : * part. Be prepared to retry if the UPDATE fails because
1146 : * of another concurrent UPDATE/DELETE to the conflict
1147 : * tuple.
1148 : */
1149 5206 : TupleTableSlot *returning = NULL;
1150 :
1151 5206 : if (ExecOnConflictUpdate(context, resultRelInfo,
1152 : &conflictTid, slot, canSetTag,
1153 : &returning))
1154 : {
1155 5128 : InstrCountTuples2(&mtstate->ps, 1);
1156 5128 : return returning;
1157 : }
1158 : else
1159 0 : goto vlock;
1160 : }
1161 : else
1162 : {
1163 : /*
1164 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1165 : * verify that the tuple is visible to the executor's MVCC
1166 : * snapshot at higher isolation levels.
1167 : *
1168 : * Using ExecGetReturningSlot() to store the tuple for the
1169 : * recheck isn't that pretty, but we can't trivially use
1170 : * the input slot, because it might not be of a compatible
1171 : * type. As there's no conflicting usage of
1172 : * ExecGetReturningSlot() in the DO NOTHING case...
1173 : */
1174 : Assert(onconflict == ONCONFLICT_NOTHING);
1175 212 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1176 : ExecGetReturningSlot(estate, resultRelInfo));
1177 192 : InstrCountTuples2(&mtstate->ps, 1);
1178 192 : return NULL;
1179 : }
1180 : }
1181 :
1182 : /*
1183 : * Before we start insertion proper, acquire our "speculative
1184 : * insertion lock". Others can use that to wait for us to decide
1185 : * if we're going to go ahead with the insertion, instead of
1186 : * waiting for the whole transaction to complete.
1187 : */
1188 4128 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1189 :
1190 : /* insert the tuple, with the speculative token */
1191 4128 : table_tuple_insert_speculative(resultRelationDesc, slot,
1192 : estate->es_output_cid,
1193 : 0,
1194 : NULL,
1195 : specToken);
1196 :
1197 : /* insert index entries for tuple */
1198 4128 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1199 : slot, estate, false, true,
1200 : &specConflict,
1201 : arbiterIndexes,
1202 : false);
1203 :
1204 : /* adjust the tuple's state accordingly */
1205 4122 : table_tuple_complete_speculative(resultRelationDesc, slot,
1206 4122 : specToken, !specConflict);
1207 :
1208 : /*
1209 : * Wake up anyone waiting for our decision. They will re-check
1210 : * the tuple, see that it's no longer speculative, and wait on our
1211 : * XID as if this was a regularly inserted tuple all along. Or if
1212 : * we killed the tuple, they will see it's dead, and proceed as if
1213 : * the tuple never existed.
1214 : */
1215 4122 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1216 :
1217 : /*
1218 : * If there was a conflict, start from the beginning. We'll do
1219 : * the pre-check again, which will now find the conflicting tuple
1220 : * (unless it aborts before we get there).
1221 : */
1222 4122 : if (specConflict)
1223 : {
1224 10 : list_free(recheckIndexes);
1225 10 : goto vlock;
1226 : }
1227 :
1228 : /* Since there was no insertion conflict, we're done */
1229 : }
1230 : else
1231 : {
1232 : /* insert the tuple normally */
1233 12151224 : table_tuple_insert(resultRelationDesc, slot,
1234 : estate->es_output_cid,
1235 : 0, NULL);
1236 :
1237 : /* insert index entries for tuple */
1238 12151190 : if (resultRelInfo->ri_NumIndices > 0)
1239 3088680 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1240 : slot, estate, false,
1241 : false, NULL, NIL,
1242 : false);
1243 : }
1244 : }
1245 :
1246 12156596 : if (canSetTag)
1247 12155418 : (estate->es_processed)++;
1248 :
1249 : /*
1250 : * If this insert is the result of a partition key update that moved the
1251 : * tuple to a new partition, put this row into the transition NEW TABLE,
1252 : * if there is one. We need to do this separately for DELETE and INSERT
1253 : * because they happen on different tables.
1254 : */
1255 12156596 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1256 12156596 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1257 54 : && mtstate->mt_transition_capture->tcs_update_new_table)
1258 : {
1259 48 : ExecARUpdateTriggers(estate, resultRelInfo,
1260 : NULL, NULL,
1261 : NULL,
1262 : NULL,
1263 : slot,
1264 : NULL,
1265 48 : mtstate->mt_transition_capture,
1266 : false);
1267 :
1268 : /*
1269 : * We've already captured the NEW TABLE row, so make sure any AR
1270 : * INSERT trigger fired below doesn't capture it again.
1271 : */
1272 48 : ar_insert_trig_tcs = NULL;
1273 : }
1274 :
1275 : /* AFTER ROW INSERT Triggers */
1276 12156596 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1277 : ar_insert_trig_tcs);
1278 :
1279 12156596 : list_free(recheckIndexes);
1280 :
1281 : /*
1282 : * Check any WITH CHECK OPTION constraints from parent views. We are
1283 : * required to do this after testing all constraints and uniqueness
1284 : * violations per the SQL spec, so we do it after actually inserting the
1285 : * record into the heap and all indexes.
1286 : *
1287 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1288 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1289 : *
1290 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1291 : * are looking for at this point.
1292 : */
1293 12156596 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1294 382 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1295 :
1296 : /* Process RETURNING if present */
1297 12156450 : if (resultRelInfo->ri_projectReturning)
1298 : {
1299 3580 : TupleTableSlot *oldSlot = NULL;
1300 :
1301 : /*
1302 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1303 : * refers to any OLD columns, ExecDelete() will have saved the tuple
1304 : * deleted from the original partition, which we must use here to
1305 : * compute the OLD column values. Otherwise, all OLD column values
1306 : * will be NULL.
1307 : */
1308 3580 : if (context->cpDeletedSlot)
1309 : {
1310 : TupleConversionMap *tupconv_map;
1311 :
1312 : /*
1313 : * Convert the OLD tuple to the new partition's format/slot, if
1314 : * needed. Note that ExecDelete() already converted it to the
1315 : * root's partition's format/slot.
1316 : */
1317 44 : oldSlot = context->cpDeletedSlot;
1318 44 : tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1319 44 : if (tupconv_map != NULL)
1320 : {
1321 14 : oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1322 : oldSlot,
1323 : ExecGetReturningSlot(estate,
1324 : resultRelInfo));
1325 :
1326 14 : oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1327 14 : ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1328 : }
1329 : }
1330 :
1331 3580 : result = ExecProcessReturning(context, resultRelInfo, CMD_INSERT,
1332 : oldSlot, slot, planSlot);
1333 :
1334 : /*
1335 : * For a cross-partition UPDATE, release the old tuple, first making
1336 : * sure that the result slot has a local copy of any pass-by-reference
1337 : * values.
1338 : */
1339 3568 : if (context->cpDeletedSlot)
1340 : {
1341 44 : ExecMaterializeSlot(result);
1342 44 : ExecClearTuple(oldSlot);
1343 44 : if (context->cpDeletedSlot != oldSlot)
1344 14 : ExecClearTuple(context->cpDeletedSlot);
1345 44 : context->cpDeletedSlot = NULL;
1346 : }
1347 : }
1348 :
1349 12156438 : if (inserted_tuple)
1350 808 : *inserted_tuple = slot;
1351 12156438 : if (insert_destrel)
1352 808 : *insert_destrel = resultRelInfo;
1353 :
1354 12156438 : return result;
1355 : }
1356 :
1357 : /* ----------------------------------------------------------------
1358 : * ExecBatchInsert
1359 : *
1360 : * Insert multiple tuples in an efficient way.
1361 : * Currently, this handles inserting into a foreign table without
1362 : * RETURNING clause.
1363 : * ----------------------------------------------------------------
1364 : */
1365 : static void
1366 56 : ExecBatchInsert(ModifyTableState *mtstate,
1367 : ResultRelInfo *resultRelInfo,
1368 : TupleTableSlot **slots,
1369 : TupleTableSlot **planSlots,
1370 : int numSlots,
1371 : EState *estate,
1372 : bool canSetTag)
1373 : {
1374 : int i;
1375 56 : int numInserted = numSlots;
1376 56 : TupleTableSlot *slot = NULL;
1377 : TupleTableSlot **rslots;
1378 :
1379 : /*
1380 : * insert into foreign table: let the FDW do it
1381 : */
1382 56 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1383 : resultRelInfo,
1384 : slots,
1385 : planSlots,
1386 : &numInserted);
1387 :
1388 344 : for (i = 0; i < numInserted; i++)
1389 : {
1390 288 : slot = rslots[i];
1391 :
1392 : /*
1393 : * AFTER ROW Triggers might reference the tableoid column, so
1394 : * (re-)initialize tts_tableOid before evaluating them.
1395 : */
1396 288 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1397 :
1398 : /* AFTER ROW INSERT Triggers */
1399 288 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1400 288 : mtstate->mt_transition_capture);
1401 :
1402 : /*
1403 : * Check any WITH CHECK OPTION constraints from parent views. See the
1404 : * comment in ExecInsert.
1405 : */
1406 288 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1407 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1408 : }
1409 :
1410 56 : if (canSetTag && numInserted > 0)
1411 56 : estate->es_processed += numInserted;
1412 :
1413 : /* Clean up all the slots, ready for the next batch */
1414 344 : for (i = 0; i < numSlots; i++)
1415 : {
1416 288 : ExecClearTuple(slots[i]);
1417 288 : ExecClearTuple(planSlots[i]);
1418 : }
1419 56 : resultRelInfo->ri_NumSlots = 0;
1420 56 : }
1421 :
1422 : /*
1423 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1424 : */
1425 : static void
1426 34 : ExecPendingInserts(EState *estate)
1427 : {
1428 : ListCell *l1,
1429 : *l2;
1430 :
1431 70 : forboth(l1, estate->es_insert_pending_result_relations,
1432 : l2, estate->es_insert_pending_modifytables)
1433 : {
1434 36 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1435 36 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1436 :
1437 : Assert(mtstate);
1438 36 : ExecBatchInsert(mtstate, resultRelInfo,
1439 : resultRelInfo->ri_Slots,
1440 : resultRelInfo->ri_PlanSlots,
1441 : resultRelInfo->ri_NumSlots,
1442 36 : estate, mtstate->canSetTag);
1443 : }
1444 :
1445 34 : list_free(estate->es_insert_pending_result_relations);
1446 34 : list_free(estate->es_insert_pending_modifytables);
1447 34 : estate->es_insert_pending_result_relations = NIL;
1448 34 : estate->es_insert_pending_modifytables = NIL;
1449 34 : }
1450 :
1451 : /*
1452 : * ExecDeletePrologue -- subroutine for ExecDelete
1453 : *
1454 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1455 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1456 : * the delete a no-op; otherwise, return true.
1457 : */
1458 : static bool
1459 1646030 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1460 : ItemPointer tupleid, HeapTuple oldtuple,
1461 : TupleTableSlot **epqreturnslot, TM_Result *result)
1462 : {
1463 1646030 : if (result)
1464 1556 : *result = TM_Ok;
1465 :
1466 : /* BEFORE ROW DELETE triggers */
1467 1646030 : if (resultRelInfo->ri_TrigDesc &&
1468 6984 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1469 : {
1470 : /* Flush any pending inserts, so rows are visible to the triggers */
1471 346 : if (context->estate->es_insert_pending_result_relations != NIL)
1472 2 : ExecPendingInserts(context->estate);
1473 :
1474 346 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1475 : resultRelInfo, tupleid, oldtuple,
1476 : epqreturnslot, result, &context->tmfd);
1477 : }
1478 :
1479 1645684 : return true;
1480 : }
1481 :
1482 : /*
1483 : * ExecDeleteAct -- subroutine for ExecDelete
1484 : *
1485 : * Actually delete the tuple from a plain table.
1486 : *
1487 : * Caller is in charge of doing EvalPlanQual as necessary
1488 : */
1489 : static TM_Result
1490 1645852 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1491 : ItemPointer tupleid, bool changingPart)
1492 : {
1493 1645852 : EState *estate = context->estate;
1494 :
1495 1645852 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1496 : estate->es_output_cid,
1497 : estate->es_snapshot,
1498 : estate->es_crosscheck_snapshot,
1499 : true /* wait for commit */ ,
1500 : &context->tmfd,
1501 : changingPart);
1502 : }
1503 :
1504 : /*
1505 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1506 : *
1507 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1508 : * including the UPDATE triggers if the deletion is being done as part of a
1509 : * cross-partition tuple move.
1510 : */
1511 : static void
1512 1645800 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1513 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1514 : {
1515 1645800 : ModifyTableState *mtstate = context->mtstate;
1516 1645800 : EState *estate = context->estate;
1517 : TransitionCaptureState *ar_delete_trig_tcs;
1518 :
1519 : /*
1520 : * If this delete is the result of a partition key update that moved the
1521 : * tuple to a new partition, put this row into the transition OLD TABLE,
1522 : * if there is one. We need to do this separately for DELETE and INSERT
1523 : * because they happen on different tables.
1524 : */
1525 1645800 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1526 1645800 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1527 54 : mtstate->mt_transition_capture->tcs_update_old_table)
1528 : {
1529 48 : ExecARUpdateTriggers(estate, resultRelInfo,
1530 : NULL, NULL,
1531 : tupleid, oldtuple,
1532 48 : NULL, NULL, mtstate->mt_transition_capture,
1533 : false);
1534 :
1535 : /*
1536 : * We've already captured the OLD TABLE row, so make sure any AR
1537 : * DELETE trigger fired below doesn't capture it again.
1538 : */
1539 48 : ar_delete_trig_tcs = NULL;
1540 : }
1541 :
1542 : /* AFTER ROW DELETE Triggers */
1543 1645800 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1544 : ar_delete_trig_tcs, changingPart);
1545 1645800 : }
1546 :
1547 : /* ----------------------------------------------------------------
1548 : * ExecDelete
1549 : *
1550 : * DELETE is like UPDATE, except that we delete the tuple and no
1551 : * index modifications are needed.
1552 : *
1553 : * When deleting from a table, tupleid identifies the tuple to delete and
1554 : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1555 : * oldtuple is passed to the triggers and identifies what to delete, and
1556 : * tupleid is invalid. When deleting from a foreign table, tupleid is
1557 : * invalid; the FDW has to figure out which row to delete using data from
1558 : * the planSlot. oldtuple is passed to foreign table triggers; it is
1559 : * NULL when the foreign table has no relevant triggers. We use
1560 : * tupleDeleted to indicate whether the tuple is actually deleted,
1561 : * callers can use it to decide whether to continue the operation. When
1562 : * this DELETE is a part of an UPDATE of partition-key, then the slot
1563 : * returned by EvalPlanQual() is passed back using output parameter
1564 : * epqreturnslot.
1565 : *
1566 : * Returns RETURNING result if any, otherwise NULL.
1567 : * ----------------------------------------------------------------
1568 : */
1569 : static TupleTableSlot *
1570 1645516 : ExecDelete(ModifyTableContext *context,
1571 : ResultRelInfo *resultRelInfo,
1572 : ItemPointer tupleid,
1573 : HeapTuple oldtuple,
1574 : bool processReturning,
1575 : bool changingPart,
1576 : bool canSetTag,
1577 : TM_Result *tmresult,
1578 : bool *tupleDeleted,
1579 : TupleTableSlot **epqreturnslot)
1580 : {
1581 1645516 : EState *estate = context->estate;
1582 1645516 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1583 1645516 : TupleTableSlot *slot = NULL;
1584 : TM_Result result;
1585 : bool saveOld;
1586 :
1587 1645516 : if (tupleDeleted)
1588 1042 : *tupleDeleted = false;
1589 :
1590 : /*
1591 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1592 : * done if it says we are.
1593 : */
1594 1645516 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1595 : epqreturnslot, tmresult))
1596 52 : return NULL;
1597 :
1598 : /* INSTEAD OF ROW DELETE Triggers */
1599 1645448 : if (resultRelInfo->ri_TrigDesc &&
1600 6860 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1601 48 : {
1602 : bool dodelete;
1603 :
1604 : Assert(oldtuple != NULL);
1605 54 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1606 :
1607 54 : if (!dodelete) /* "do nothing" */
1608 6 : return NULL;
1609 : }
1610 1645394 : else if (resultRelInfo->ri_FdwRoutine)
1611 : {
1612 : /*
1613 : * delete from foreign table: let the FDW do it
1614 : *
1615 : * We offer the returning slot as a place to store RETURNING data,
1616 : * although the FDW can return some other slot if it wants.
1617 : */
1618 42 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1619 42 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1620 : resultRelInfo,
1621 : slot,
1622 : context->planSlot);
1623 :
1624 42 : if (slot == NULL) /* "do nothing" */
1625 0 : return NULL;
1626 :
1627 : /*
1628 : * RETURNING expressions might reference the tableoid column, so
1629 : * (re)initialize tts_tableOid before evaluating them.
1630 : */
1631 42 : if (TTS_EMPTY(slot))
1632 6 : ExecStoreAllNullTuple(slot);
1633 :
1634 42 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1635 : }
1636 : else
1637 : {
1638 : /*
1639 : * delete the tuple
1640 : *
1641 : * Note: if context->estate->es_crosscheck_snapshot isn't
1642 : * InvalidSnapshot, we check that the row to be deleted is visible to
1643 : * that snapshot, and throw a can't-serialize error if not. This is a
1644 : * special-case behavior needed for referential integrity updates in
1645 : * transaction-snapshot mode transactions.
1646 : */
1647 1645352 : ldelete:
1648 1645356 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1649 :
1650 1645320 : if (tmresult)
1651 1008 : *tmresult = result;
1652 :
1653 1645320 : switch (result)
1654 : {
1655 30 : case TM_SelfModified:
1656 :
1657 : /*
1658 : * The target tuple was already updated or deleted by the
1659 : * current command, or by a later command in the current
1660 : * transaction. The former case is possible in a join DELETE
1661 : * where multiple tuples join to the same target tuple. This
1662 : * is somewhat questionable, but Postgres has always allowed
1663 : * it: we just ignore additional deletion attempts.
1664 : *
1665 : * The latter case arises if the tuple is modified by a
1666 : * command in a BEFORE trigger, or perhaps by a command in a
1667 : * volatile function used in the query. In such situations we
1668 : * should not ignore the deletion, but it is equally unsafe to
1669 : * proceed. We don't want to discard the original DELETE
1670 : * while keeping the triggered actions based on its deletion;
1671 : * and it would be no better to allow the original DELETE
1672 : * while discarding updates that it triggered. The row update
1673 : * carries some information that might be important according
1674 : * to business rules; so throwing an error is the only safe
1675 : * course.
1676 : *
1677 : * If a trigger actually intends this type of interaction, it
1678 : * can re-execute the DELETE and then return NULL to cancel
1679 : * the outer delete.
1680 : */
1681 30 : if (context->tmfd.cmax != estate->es_output_cid)
1682 6 : ereport(ERROR,
1683 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1684 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1685 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1686 :
1687 : /* Else, already deleted by self; nothing to do */
1688 24 : return NULL;
1689 :
1690 1645226 : case TM_Ok:
1691 1645226 : break;
1692 :
1693 58 : case TM_Updated:
1694 : {
1695 : TupleTableSlot *inputslot;
1696 : TupleTableSlot *epqslot;
1697 :
1698 58 : if (IsolationUsesXactSnapshot())
1699 2 : ereport(ERROR,
1700 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1701 : errmsg("could not serialize access due to concurrent update")));
1702 :
1703 : /*
1704 : * Already know that we're going to need to do EPQ, so
1705 : * fetch tuple directly into the right slot.
1706 : */
1707 56 : EvalPlanQualBegin(context->epqstate);
1708 56 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1709 : resultRelInfo->ri_RangeTableIndex);
1710 :
1711 56 : result = table_tuple_lock(resultRelationDesc, tupleid,
1712 : estate->es_snapshot,
1713 : inputslot, estate->es_output_cid,
1714 : LockTupleExclusive, LockWaitBlock,
1715 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1716 : &context->tmfd);
1717 :
1718 52 : switch (result)
1719 : {
1720 46 : case TM_Ok:
1721 : Assert(context->tmfd.traversed);
1722 46 : epqslot = EvalPlanQual(context->epqstate,
1723 : resultRelationDesc,
1724 : resultRelInfo->ri_RangeTableIndex,
1725 : inputslot);
1726 46 : if (TupIsNull(epqslot))
1727 : /* Tuple not passing quals anymore, exiting... */
1728 30 : return NULL;
1729 :
1730 : /*
1731 : * If requested, skip delete and pass back the
1732 : * updated row.
1733 : */
1734 16 : if (epqreturnslot)
1735 : {
1736 12 : *epqreturnslot = epqslot;
1737 12 : return NULL;
1738 : }
1739 : else
1740 4 : goto ldelete;
1741 :
1742 4 : case TM_SelfModified:
1743 :
1744 : /*
1745 : * This can be reached when following an update
1746 : * chain from a tuple updated by another session,
1747 : * reaching a tuple that was already updated in
1748 : * this transaction. If previously updated by this
1749 : * command, ignore the delete, otherwise error
1750 : * out.
1751 : *
1752 : * See also TM_SelfModified response to
1753 : * table_tuple_delete() above.
1754 : */
1755 4 : if (context->tmfd.cmax != estate->es_output_cid)
1756 2 : ereport(ERROR,
1757 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1758 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1759 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1760 2 : return NULL;
1761 :
1762 2 : case TM_Deleted:
1763 : /* tuple already deleted; nothing to do */
1764 2 : return NULL;
1765 :
1766 0 : default:
1767 :
1768 : /*
1769 : * TM_Invisible should be impossible because we're
1770 : * waiting for updated row versions, and would
1771 : * already have errored out if the first version
1772 : * is invisible.
1773 : *
1774 : * TM_Updated should be impossible, because we're
1775 : * locking the latest version via
1776 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1777 : */
1778 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1779 : result);
1780 : return NULL;
1781 : }
1782 :
1783 : Assert(false);
1784 : break;
1785 : }
1786 :
1787 6 : case TM_Deleted:
1788 6 : if (IsolationUsesXactSnapshot())
1789 0 : ereport(ERROR,
1790 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1791 : errmsg("could not serialize access due to concurrent delete")));
1792 : /* tuple already deleted; nothing to do */
1793 6 : return NULL;
1794 :
1795 0 : default:
1796 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1797 : result);
1798 : return NULL;
1799 : }
1800 :
1801 : /*
1802 : * Note: Normally one would think that we have to delete index tuples
1803 : * associated with the heap tuple now...
1804 : *
1805 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1806 : * take care of it later. We can't delete index tuples immediately
1807 : * anyway, since the tuple is still visible to other transactions.
1808 : */
1809 : }
1810 :
1811 1645316 : if (canSetTag)
1812 1644124 : (estate->es_processed)++;
1813 :
1814 : /* Tell caller that the delete actually happened. */
1815 1645316 : if (tupleDeleted)
1816 964 : *tupleDeleted = true;
1817 :
1818 1645316 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1819 :
1820 : /*
1821 : * Process RETURNING if present and if requested.
1822 : *
1823 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1824 : * refers to any OLD column values, save the old tuple here for later
1825 : * processing of the RETURNING list by ExecInsert().
1826 : */
1827 1645462 : saveOld = changingPart && resultRelInfo->ri_projectReturning &&
1828 146 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD;
1829 :
1830 1645316 : if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
1831 : {
1832 : /*
1833 : * We have to put the target tuple into a slot, which means first we
1834 : * gotta fetch it. We can use the trigger tuple slot.
1835 : */
1836 : TupleTableSlot *rslot;
1837 :
1838 984 : if (resultRelInfo->ri_FdwRoutine)
1839 : {
1840 : /* FDW must have provided a slot containing the deleted row */
1841 : Assert(!TupIsNull(slot));
1842 : }
1843 : else
1844 : {
1845 970 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1846 970 : if (oldtuple != NULL)
1847 : {
1848 24 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1849 : }
1850 : else
1851 : {
1852 946 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1853 : SnapshotAny, slot))
1854 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1855 : }
1856 : }
1857 :
1858 : /*
1859 : * If required, save the old tuple for later processing of the
1860 : * RETURNING list by ExecInsert().
1861 : */
1862 984 : if (saveOld)
1863 : {
1864 : TupleConversionMap *tupconv_map;
1865 :
1866 : /*
1867 : * Convert the tuple into the root partition's format/slot, if
1868 : * needed. ExecInsert() will then convert it to the new
1869 : * partition's format/slot, if necessary.
1870 : */
1871 44 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1872 44 : if (tupconv_map != NULL)
1873 : {
1874 18 : ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
1875 18 : TupleTableSlot *oldSlot = slot;
1876 :
1877 18 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1878 : slot,
1879 : ExecGetReturningSlot(estate,
1880 : rootRelInfo));
1881 :
1882 18 : slot->tts_tableOid = oldSlot->tts_tableOid;
1883 18 : ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
1884 : }
1885 :
1886 44 : context->cpDeletedSlot = slot;
1887 :
1888 44 : return NULL;
1889 : }
1890 :
1891 940 : rslot = ExecProcessReturning(context, resultRelInfo, CMD_DELETE,
1892 : slot, NULL, context->planSlot);
1893 :
1894 : /*
1895 : * Before releasing the target tuple again, make sure rslot has a
1896 : * local copy of any pass-by-reference values.
1897 : */
1898 940 : ExecMaterializeSlot(rslot);
1899 :
1900 940 : ExecClearTuple(slot);
1901 :
1902 940 : return rslot;
1903 : }
1904 :
1905 1644332 : return NULL;
1906 : }
1907 :
1908 : /*
1909 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1910 : *
1911 : * This works by first deleting the old tuple from the current partition,
1912 : * followed by inserting the new tuple into the root parent table, that is,
1913 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1914 : * correct partition.
1915 : *
1916 : * Returns true if the tuple has been successfully moved, or if it's found
1917 : * that the tuple was concurrently deleted so there's nothing more to do
1918 : * for the caller.
1919 : *
1920 : * False is returned if the tuple we're trying to move is found to have been
1921 : * concurrently updated. In that case, the caller must check if the updated
1922 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1923 : * this function again or perform a regular update accordingly. For MERGE,
1924 : * the updated tuple is not returned in *retry_slot; it has its own retry
1925 : * logic.
1926 : */
1927 : static bool
1928 1090 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1929 : ResultRelInfo *resultRelInfo,
1930 : ItemPointer tupleid, HeapTuple oldtuple,
1931 : TupleTableSlot *slot,
1932 : bool canSetTag,
1933 : UpdateContext *updateCxt,
1934 : TM_Result *tmresult,
1935 : TupleTableSlot **retry_slot,
1936 : TupleTableSlot **inserted_tuple,
1937 : ResultRelInfo **insert_destrel)
1938 : {
1939 1090 : ModifyTableState *mtstate = context->mtstate;
1940 1090 : EState *estate = mtstate->ps.state;
1941 : TupleConversionMap *tupconv_map;
1942 : bool tuple_deleted;
1943 1090 : TupleTableSlot *epqslot = NULL;
1944 :
1945 1090 : context->cpDeletedSlot = NULL;
1946 1090 : context->cpUpdateReturningSlot = NULL;
1947 1090 : *retry_slot = NULL;
1948 :
1949 : /*
1950 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1951 : * to migrate to a different partition. Maybe this can be implemented
1952 : * some day, but it seems a fringe feature with little redeeming value.
1953 : */
1954 1090 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1955 0 : ereport(ERROR,
1956 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1957 : errmsg("invalid ON UPDATE specification"),
1958 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1959 :
1960 : /*
1961 : * When an UPDATE is run directly on a leaf partition, simply fail with a
1962 : * partition constraint violation error.
1963 : */
1964 1090 : if (resultRelInfo == mtstate->rootResultRelInfo)
1965 48 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1966 :
1967 : /* Initialize tuple routing info if not already done. */
1968 1042 : if (mtstate->mt_partition_tuple_routing == NULL)
1969 : {
1970 662 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1971 : MemoryContext oldcxt;
1972 :
1973 : /* Things built here have to last for the query duration. */
1974 662 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1975 :
1976 662 : mtstate->mt_partition_tuple_routing =
1977 662 : ExecSetupPartitionTupleRouting(estate, rootRel);
1978 :
1979 : /*
1980 : * Before a partition's tuple can be re-routed, it must first be
1981 : * converted to the root's format, so we'll need a slot for storing
1982 : * such tuples.
1983 : */
1984 : Assert(mtstate->mt_root_tuple_slot == NULL);
1985 662 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1986 :
1987 662 : MemoryContextSwitchTo(oldcxt);
1988 : }
1989 :
1990 : /*
1991 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1992 : * We want to return rows from INSERT.
1993 : */
1994 1042 : ExecDelete(context, resultRelInfo,
1995 : tupleid, oldtuple,
1996 : false, /* processReturning */
1997 : true, /* changingPart */
1998 : false, /* canSetTag */
1999 : tmresult, &tuple_deleted, &epqslot);
2000 :
2001 : /*
2002 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
2003 : * it was already deleted by self, or it was concurrently deleted by
2004 : * another transaction), then we should skip the insert as well;
2005 : * otherwise, an UPDATE could cause an increase in the total number of
2006 : * rows across all partitions, which is clearly wrong.
2007 : *
2008 : * For a normal UPDATE, the case where the tuple has been the subject of a
2009 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
2010 : * machinery, but for an UPDATE that we've translated into a DELETE from
2011 : * this partition and an INSERT into some other partition, that's not
2012 : * available, because CTID chains can't span relation boundaries. We
2013 : * mimic the semantics to a limited extent by skipping the INSERT if the
2014 : * DELETE fails to find a tuple. This ensures that two concurrent
2015 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
2016 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
2017 : * it.
2018 : */
2019 1040 : if (!tuple_deleted)
2020 : {
2021 : /*
2022 : * epqslot will be typically NULL. But when ExecDelete() finds that
2023 : * another transaction has concurrently updated the same row, it
2024 : * re-fetches the row, skips the delete, and epqslot is set to the
2025 : * re-fetched tuple slot. In that case, we need to do all the checks
2026 : * again. For MERGE, we leave everything to the caller (it must do
2027 : * additional rechecking, and might end up executing a different
2028 : * action entirely).
2029 : */
2030 76 : if (mtstate->operation == CMD_MERGE)
2031 34 : return *tmresult == TM_Ok;
2032 42 : else if (TupIsNull(epqslot))
2033 36 : return true;
2034 : else
2035 : {
2036 : /* Fetch the most recent version of old tuple. */
2037 : TupleTableSlot *oldSlot;
2038 :
2039 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
2040 6 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2041 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
2042 6 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2043 6 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2044 : tupleid,
2045 : SnapshotAny,
2046 : oldSlot))
2047 0 : elog(ERROR, "failed to fetch tuple being updated");
2048 : /* and project the new tuple to retry the UPDATE with */
2049 6 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2050 : oldSlot);
2051 6 : return false;
2052 : }
2053 : }
2054 :
2055 : /*
2056 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
2057 : * convert the tuple into root's tuple descriptor if needed, since
2058 : * ExecInsert() starts the search from root.
2059 : */
2060 964 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2061 964 : if (tupconv_map != NULL)
2062 314 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2063 : slot,
2064 : mtstate->mt_root_tuple_slot);
2065 :
2066 : /* Tuple routing starts from the root table. */
2067 836 : context->cpUpdateReturningSlot =
2068 964 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2069 : inserted_tuple, insert_destrel);
2070 :
2071 : /*
2072 : * Reset the transition state that may possibly have been written by
2073 : * INSERT.
2074 : */
2075 836 : if (mtstate->mt_transition_capture)
2076 54 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
2077 :
2078 : /* We're done moving. */
2079 836 : return true;
2080 : }
2081 :
2082 : /*
2083 : * ExecUpdatePrologue -- subroutine for ExecUpdate
2084 : *
2085 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
2086 : * triggers. We return false if one of them makes the update a no-op;
2087 : * otherwise, return true.
2088 : */
2089 : static bool
2090 324502 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2091 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2092 : TM_Result *result)
2093 : {
2094 324502 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2095 :
2096 324502 : if (result)
2097 2134 : *result = TM_Ok;
2098 :
2099 324502 : ExecMaterializeSlot(slot);
2100 :
2101 : /*
2102 : * Open the table's indexes, if we have not done so already, so that we
2103 : * can add new index entries for the updated tuple.
2104 : */
2105 324502 : if (resultRelationDesc->rd_rel->relhasindex &&
2106 233584 : resultRelInfo->ri_IndexRelationDescs == NULL)
2107 8724 : ExecOpenIndices(resultRelInfo, false);
2108 :
2109 : /* BEFORE ROW UPDATE triggers */
2110 324502 : if (resultRelInfo->ri_TrigDesc &&
2111 6254 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
2112 : {
2113 : /* Flush any pending inserts, so rows are visible to the triggers */
2114 2560 : if (context->estate->es_insert_pending_result_relations != NIL)
2115 2 : ExecPendingInserts(context->estate);
2116 :
2117 2560 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
2118 : resultRelInfo, tupleid, oldtuple, slot,
2119 : result, &context->tmfd);
2120 : }
2121 :
2122 321942 : return true;
2123 : }
2124 :
2125 : /*
2126 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2127 : *
2128 : * Apply the final modifications to the tuple slot before the update.
2129 : * (This is split out because we also need it in the foreign-table code path.)
2130 : */
2131 : static void
2132 324232 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
2133 : TupleTableSlot *slot,
2134 : EState *estate)
2135 : {
2136 324232 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2137 :
2138 : /*
2139 : * Constraints and GENERATED expressions might reference the tableoid
2140 : * column, so (re-)initialize tts_tableOid before evaluating them.
2141 : */
2142 324232 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2143 :
2144 : /*
2145 : * Compute stored generated columns
2146 : */
2147 324232 : if (resultRelationDesc->rd_att->constr &&
2148 196708 : resultRelationDesc->rd_att->constr->has_generated_stored)
2149 258 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2150 : CMD_UPDATE);
2151 324232 : }
2152 :
2153 : /*
2154 : * ExecUpdateAct -- subroutine for ExecUpdate
2155 : *
2156 : * Actually update the tuple, when operating on a plain table. If the
2157 : * table is a partition, and the command was called referencing an ancestor
2158 : * partitioned table, this routine migrates the resulting tuple to another
2159 : * partition.
2160 : *
2161 : * The caller is in charge of keeping indexes current as necessary. The
2162 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
2163 : * be concurrently updated. However, in case of a cross-partition update,
2164 : * this routine does it.
2165 : */
2166 : static TM_Result
2167 324040 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2168 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2169 : bool canSetTag, UpdateContext *updateCxt)
2170 : {
2171 324040 : EState *estate = context->estate;
2172 324040 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2173 : bool partition_constraint_failed;
2174 : TM_Result result;
2175 :
2176 324040 : updateCxt->crossPartUpdate = false;
2177 :
2178 : /*
2179 : * If we move the tuple to a new partition, we loop back here to recompute
2180 : * GENERATED values (which are allowed to be different across partitions)
2181 : * and recheck any RLS policies and constraints. We do not fire any
2182 : * BEFORE triggers of the new partition, however.
2183 : */
2184 324046 : lreplace:
2185 : /* Fill in GENERATEd columns */
2186 324046 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2187 :
2188 : /* ensure slot is independent, consider e.g. EPQ */
2189 324046 : ExecMaterializeSlot(slot);
2190 :
2191 : /*
2192 : * If partition constraint fails, this row might get moved to another
2193 : * partition, in which case we should check the RLS CHECK policy just
2194 : * before inserting into the new partition, rather than doing it here.
2195 : * This is because a trigger on that partition might again change the row.
2196 : * So skip the WCO checks if the partition constraint fails.
2197 : */
2198 324046 : partition_constraint_failed =
2199 326760 : resultRelationDesc->rd_rel->relispartition &&
2200 2714 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2201 :
2202 : /* Check any RLS UPDATE WITH CHECK policies */
2203 324046 : if (!partition_constraint_failed &&
2204 322956 : resultRelInfo->ri_WithCheckOptions != NIL)
2205 : {
2206 : /*
2207 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2208 : * we are looking for at this point.
2209 : */
2210 492 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2211 : resultRelInfo, slot, estate);
2212 : }
2213 :
2214 : /*
2215 : * If a partition check failed, try to move the row into the right
2216 : * partition.
2217 : */
2218 323992 : if (partition_constraint_failed)
2219 : {
2220 : TupleTableSlot *inserted_tuple,
2221 : *retry_slot;
2222 1090 : ResultRelInfo *insert_destrel = NULL;
2223 :
2224 : /*
2225 : * ExecCrossPartitionUpdate will first DELETE the row from the
2226 : * partition it's currently in and then insert it back into the root
2227 : * table, which will re-route it to the correct partition. However,
2228 : * if the tuple has been concurrently updated, a retry is needed.
2229 : */
2230 1090 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2231 : tupleid, oldtuple, slot,
2232 : canSetTag, updateCxt,
2233 : &result,
2234 : &retry_slot,
2235 : &inserted_tuple,
2236 : &insert_destrel))
2237 : {
2238 : /* success! */
2239 896 : updateCxt->crossPartUpdate = true;
2240 :
2241 : /*
2242 : * If the partitioned table being updated is referenced in foreign
2243 : * keys, queue up trigger events to check that none of them were
2244 : * violated. No special treatment is needed in
2245 : * non-cross-partition update situations, because the leaf
2246 : * partition's AR update triggers will take care of that. During
2247 : * cross-partition updates implemented as delete on the source
2248 : * partition followed by insert on the destination partition,
2249 : * AR-UPDATE triggers of the root table (that is, the table
2250 : * mentioned in the query) must be fired.
2251 : *
2252 : * NULL insert_destrel means that the move failed to occur, that
2253 : * is, the update failed, so no need to anything in that case.
2254 : */
2255 896 : if (insert_destrel &&
2256 808 : resultRelInfo->ri_TrigDesc &&
2257 362 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2258 300 : ExecCrossPartitionUpdateForeignKey(context,
2259 : resultRelInfo,
2260 : insert_destrel,
2261 : tupleid, slot,
2262 : inserted_tuple);
2263 :
2264 900 : return TM_Ok;
2265 : }
2266 :
2267 : /*
2268 : * No luck, a retry is needed. If running MERGE, we do not do so
2269 : * here; instead let it handle that on its own rules.
2270 : */
2271 16 : if (context->mtstate->operation == CMD_MERGE)
2272 10 : return result;
2273 :
2274 : /*
2275 : * ExecCrossPartitionUpdate installed an updated version of the new
2276 : * tuple in the retry slot; start over.
2277 : */
2278 6 : slot = retry_slot;
2279 6 : goto lreplace;
2280 : }
2281 :
2282 : /*
2283 : * Check the constraints of the tuple. We've already checked the
2284 : * partition constraint above; however, we must still ensure the tuple
2285 : * passes all other constraints, so we will call ExecConstraints() and
2286 : * have it validate all remaining checks.
2287 : */
2288 322902 : if (resultRelationDesc->rd_att->constr)
2289 196084 : ExecConstraints(resultRelInfo, slot, estate);
2290 :
2291 : /*
2292 : * replace the heap tuple
2293 : *
2294 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2295 : * the row to be updated is visible to that snapshot, and throw a
2296 : * can't-serialize error if not. This is a special-case behavior needed
2297 : * for referential integrity updates in transaction-snapshot mode
2298 : * transactions.
2299 : */
2300 322828 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2301 : estate->es_output_cid,
2302 : estate->es_snapshot,
2303 : estate->es_crosscheck_snapshot,
2304 : true /* wait for commit */ ,
2305 : &context->tmfd, &updateCxt->lockmode,
2306 : &updateCxt->updateIndexes);
2307 :
2308 322804 : return result;
2309 : }
2310 :
2311 : /*
2312 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2313 : *
2314 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2315 : * returns indicating that the tuple was updated.
2316 : */
2317 : static void
2318 322854 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2319 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2320 : HeapTuple oldtuple, TupleTableSlot *slot)
2321 : {
2322 322854 : ModifyTableState *mtstate = context->mtstate;
2323 322854 : List *recheckIndexes = NIL;
2324 :
2325 : /* insert index entries for tuple if necessary */
2326 322854 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2327 176924 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2328 : slot, context->estate,
2329 : true, false,
2330 : NULL, NIL,
2331 176924 : (updateCxt->updateIndexes == TU_Summarizing));
2332 :
2333 : /* AFTER ROW UPDATE Triggers */
2334 322762 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2335 : NULL, NULL,
2336 : tupleid, oldtuple, slot,
2337 : recheckIndexes,
2338 322762 : mtstate->operation == CMD_INSERT ?
2339 : mtstate->mt_oc_transition_capture :
2340 : mtstate->mt_transition_capture,
2341 : false);
2342 :
2343 322762 : list_free(recheckIndexes);
2344 :
2345 : /*
2346 : * Check any WITH CHECK OPTION constraints from parent views. We are
2347 : * required to do this after testing all constraints and uniqueness
2348 : * violations per the SQL spec, so we do it after actually updating the
2349 : * record in the heap and all indexes.
2350 : *
2351 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2352 : * are looking for at this point.
2353 : */
2354 322762 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2355 466 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2356 : slot, context->estate);
2357 322680 : }
2358 :
2359 : /*
2360 : * Queues up an update event using the target root partitioned table's
2361 : * trigger to check that a cross-partition update hasn't broken any foreign
2362 : * keys pointing into it.
2363 : */
2364 : static void
2365 300 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2366 : ResultRelInfo *sourcePartInfo,
2367 : ResultRelInfo *destPartInfo,
2368 : ItemPointer tupleid,
2369 : TupleTableSlot *oldslot,
2370 : TupleTableSlot *newslot)
2371 : {
2372 : ListCell *lc;
2373 : ResultRelInfo *rootRelInfo;
2374 : List *ancestorRels;
2375 :
2376 300 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2377 300 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2378 :
2379 : /*
2380 : * For any foreign keys that point directly into a non-root ancestors of
2381 : * the source partition, we can in theory fire an update event to enforce
2382 : * those constraints using their triggers, if we could tell that both the
2383 : * source and the destination partitions are under the same ancestor. But
2384 : * for now, we simply report an error that those cannot be enforced.
2385 : */
2386 654 : foreach(lc, ancestorRels)
2387 : {
2388 360 : ResultRelInfo *rInfo = lfirst(lc);
2389 360 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2390 360 : bool has_noncloned_fkey = false;
2391 :
2392 : /* Root ancestor's triggers will be processed. */
2393 360 : if (rInfo == rootRelInfo)
2394 294 : continue;
2395 :
2396 66 : if (trigdesc && trigdesc->trig_update_after_row)
2397 : {
2398 228 : for (int i = 0; i < trigdesc->numtriggers; i++)
2399 : {
2400 168 : Trigger *trig = &trigdesc->triggers[i];
2401 :
2402 174 : if (!trig->tgisclone &&
2403 6 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2404 : {
2405 6 : has_noncloned_fkey = true;
2406 6 : break;
2407 : }
2408 : }
2409 : }
2410 :
2411 66 : if (has_noncloned_fkey)
2412 6 : ereport(ERROR,
2413 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2414 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2415 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2416 : RelationGetRelationName(rInfo->ri_RelationDesc),
2417 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2418 : errhint("Consider defining the foreign key on table \"%s\".",
2419 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2420 : }
2421 :
2422 : /* Perform the root table's triggers. */
2423 294 : ExecARUpdateTriggers(context->estate,
2424 : rootRelInfo, sourcePartInfo, destPartInfo,
2425 : tupleid, NULL, newslot, NIL, NULL, true);
2426 294 : }
2427 :
2428 : /* ----------------------------------------------------------------
2429 : * ExecUpdate
2430 : *
2431 : * note: we can't run UPDATE queries with transactions
2432 : * off because UPDATEs are actually INSERTs and our
2433 : * scan will mistakenly loop forever, updating the tuple
2434 : * it just inserted.. This should be fixed but until it
2435 : * is, we don't want to get stuck in an infinite loop
2436 : * which corrupts your database..
2437 : *
2438 : * When updating a table, tupleid identifies the tuple to update and
2439 : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2440 : * oldtuple is passed to the triggers and identifies what to update, and
2441 : * tupleid is invalid. When updating a foreign table, tupleid is
2442 : * invalid; the FDW has to figure out which row to update using data from
2443 : * the planSlot. oldtuple is passed to foreign table triggers; it is
2444 : * NULL when the foreign table has no relevant triggers.
2445 : *
2446 : * oldSlot contains the old tuple value.
2447 : * slot contains the new tuple value to be stored.
2448 : * planSlot is the output of the ModifyTable's subplan; we use it
2449 : * to access values from other input tables (for RETURNING),
2450 : * row-ID junk columns, etc.
2451 : *
2452 : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2453 : * had identified the tuple to update, it will identify the tuple
2454 : * actually updated after EvalPlanQual.
2455 : * ----------------------------------------------------------------
2456 : */
2457 : static TupleTableSlot *
2458 322368 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2459 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2460 : TupleTableSlot *slot, bool canSetTag)
2461 : {
2462 322368 : EState *estate = context->estate;
2463 322368 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2464 322368 : UpdateContext updateCxt = {0};
2465 : TM_Result result;
2466 :
2467 : /*
2468 : * abort the operation if not running transactions
2469 : */
2470 322368 : if (IsBootstrapProcessingMode())
2471 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2472 :
2473 : /*
2474 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2475 : * done if it says we are.
2476 : */
2477 322368 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2478 132 : return NULL;
2479 :
2480 : /* INSTEAD OF ROW UPDATE Triggers */
2481 322212 : if (resultRelInfo->ri_TrigDesc &&
2482 5746 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2483 : {
2484 126 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2485 : oldtuple, slot))
2486 18 : return NULL; /* "do nothing" */
2487 : }
2488 322086 : else if (resultRelInfo->ri_FdwRoutine)
2489 : {
2490 : /* Fill in GENERATEd columns */
2491 186 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2492 :
2493 : /*
2494 : * update in foreign table: let the FDW do it
2495 : */
2496 186 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2497 : resultRelInfo,
2498 : slot,
2499 : context->planSlot);
2500 :
2501 186 : if (slot == NULL) /* "do nothing" */
2502 2 : return NULL;
2503 :
2504 : /*
2505 : * AFTER ROW Triggers or RETURNING expressions might reference the
2506 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2507 : * them. (This covers the case where the FDW replaced the slot.)
2508 : */
2509 184 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2510 : }
2511 : else
2512 : {
2513 : ItemPointerData lockedtid;
2514 :
2515 : /*
2516 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2517 : * must loop back here to try again. (We don't need to redo triggers,
2518 : * however. If there are any BEFORE triggers then trigger.c will have
2519 : * done table_tuple_lock to lock the correct tuple, so there's no need
2520 : * to do them again.)
2521 : */
2522 321900 : redo_act:
2523 322002 : lockedtid = *tupleid;
2524 322002 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2525 : canSetTag, &updateCxt);
2526 :
2527 : /*
2528 : * If ExecUpdateAct reports that a cross-partition update was done,
2529 : * then the RETURNING tuple (if any) has been projected and there's
2530 : * nothing else for us to do.
2531 : */
2532 321686 : if (updateCxt.crossPartUpdate)
2533 884 : return context->cpUpdateReturningSlot;
2534 :
2535 320930 : switch (result)
2536 : {
2537 84 : case TM_SelfModified:
2538 :
2539 : /*
2540 : * The target tuple was already updated or deleted by the
2541 : * current command, or by a later command in the current
2542 : * transaction. The former case is possible in a join UPDATE
2543 : * where multiple tuples join to the same target tuple. This
2544 : * is pretty questionable, but Postgres has always allowed it:
2545 : * we just execute the first update action and ignore
2546 : * additional update attempts.
2547 : *
2548 : * The latter case arises if the tuple is modified by a
2549 : * command in a BEFORE trigger, or perhaps by a command in a
2550 : * volatile function used in the query. In such situations we
2551 : * should not ignore the update, but it is equally unsafe to
2552 : * proceed. We don't want to discard the original UPDATE
2553 : * while keeping the triggered actions based on it; and we
2554 : * have no principled way to merge this update with the
2555 : * previous ones. So throwing an error is the only safe
2556 : * course.
2557 : *
2558 : * If a trigger actually intends this type of interaction, it
2559 : * can re-execute the UPDATE (assuming it can figure out how)
2560 : * and then return NULL to cancel the outer update.
2561 : */
2562 84 : if (context->tmfd.cmax != estate->es_output_cid)
2563 6 : ereport(ERROR,
2564 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2565 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2566 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2567 :
2568 : /* Else, already updated by self; nothing to do */
2569 78 : return NULL;
2570 :
2571 320684 : case TM_Ok:
2572 320684 : break;
2573 :
2574 154 : case TM_Updated:
2575 : {
2576 : TupleTableSlot *inputslot;
2577 : TupleTableSlot *epqslot;
2578 :
2579 154 : if (IsolationUsesXactSnapshot())
2580 4 : ereport(ERROR,
2581 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2582 : errmsg("could not serialize access due to concurrent update")));
2583 :
2584 : /*
2585 : * Already know that we're going to need to do EPQ, so
2586 : * fetch tuple directly into the right slot.
2587 : */
2588 150 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2589 : resultRelInfo->ri_RangeTableIndex);
2590 :
2591 150 : result = table_tuple_lock(resultRelationDesc, tupleid,
2592 : estate->es_snapshot,
2593 : inputslot, estate->es_output_cid,
2594 : updateCxt.lockmode, LockWaitBlock,
2595 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2596 : &context->tmfd);
2597 :
2598 146 : switch (result)
2599 : {
2600 136 : case TM_Ok:
2601 : Assert(context->tmfd.traversed);
2602 :
2603 136 : epqslot = EvalPlanQual(context->epqstate,
2604 : resultRelationDesc,
2605 : resultRelInfo->ri_RangeTableIndex,
2606 : inputslot);
2607 136 : if (TupIsNull(epqslot))
2608 : /* Tuple not passing quals anymore, exiting... */
2609 34 : return NULL;
2610 :
2611 : /* Make sure ri_oldTupleSlot is initialized. */
2612 102 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2613 0 : ExecInitUpdateProjection(context->mtstate,
2614 : resultRelInfo);
2615 :
2616 102 : if (resultRelInfo->ri_needLockTagTuple)
2617 : {
2618 2 : UnlockTuple(resultRelationDesc,
2619 : &lockedtid, InplaceUpdateTupleLock);
2620 2 : LockTuple(resultRelationDesc,
2621 : tupleid, InplaceUpdateTupleLock);
2622 : }
2623 :
2624 : /* Fetch the most recent version of old tuple. */
2625 102 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2626 102 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2627 : tupleid,
2628 : SnapshotAny,
2629 : oldSlot))
2630 0 : elog(ERROR, "failed to fetch tuple being updated");
2631 102 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2632 : epqslot, oldSlot);
2633 102 : goto redo_act;
2634 :
2635 2 : case TM_Deleted:
2636 : /* tuple already deleted; nothing to do */
2637 2 : return NULL;
2638 :
2639 8 : case TM_SelfModified:
2640 :
2641 : /*
2642 : * This can be reached when following an update
2643 : * chain from a tuple updated by another session,
2644 : * reaching a tuple that was already updated in
2645 : * this transaction. If previously modified by
2646 : * this command, ignore the redundant update,
2647 : * otherwise error out.
2648 : *
2649 : * See also TM_SelfModified response to
2650 : * table_tuple_update() above.
2651 : */
2652 8 : if (context->tmfd.cmax != estate->es_output_cid)
2653 2 : ereport(ERROR,
2654 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2655 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2656 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2657 6 : return NULL;
2658 :
2659 0 : default:
2660 : /* see table_tuple_lock call in ExecDelete() */
2661 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2662 : result);
2663 : return NULL;
2664 : }
2665 : }
2666 :
2667 : break;
2668 :
2669 8 : case TM_Deleted:
2670 8 : if (IsolationUsesXactSnapshot())
2671 0 : ereport(ERROR,
2672 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2673 : errmsg("could not serialize access due to concurrent delete")));
2674 : /* tuple already deleted; nothing to do */
2675 8 : return NULL;
2676 :
2677 0 : default:
2678 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2679 : result);
2680 : return NULL;
2681 : }
2682 : }
2683 :
2684 320964 : if (canSetTag)
2685 320366 : (estate->es_processed)++;
2686 :
2687 320964 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2688 : slot);
2689 :
2690 : /* Process RETURNING if present */
2691 320802 : if (resultRelInfo->ri_projectReturning)
2692 2390 : return ExecProcessReturning(context, resultRelInfo, CMD_UPDATE,
2693 : oldSlot, slot, context->planSlot);
2694 :
2695 318412 : return NULL;
2696 : }
2697 :
2698 : /*
2699 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2700 : *
2701 : * Try to lock tuple for update as part of speculative insertion. If
2702 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2703 : * (but still lock row, even though it may not satisfy estate's
2704 : * snapshot).
2705 : *
2706 : * Returns true if we're done (with or without an update), or false if
2707 : * the caller must retry the INSERT from scratch.
2708 : */
2709 : static bool
2710 5206 : ExecOnConflictUpdate(ModifyTableContext *context,
2711 : ResultRelInfo *resultRelInfo,
2712 : ItemPointer conflictTid,
2713 : TupleTableSlot *excludedSlot,
2714 : bool canSetTag,
2715 : TupleTableSlot **returning)
2716 : {
2717 5206 : ModifyTableState *mtstate = context->mtstate;
2718 5206 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2719 5206 : Relation relation = resultRelInfo->ri_RelationDesc;
2720 5206 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2721 5206 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2722 : TM_FailureData tmfd;
2723 : LockTupleMode lockmode;
2724 : TM_Result test;
2725 : Datum xminDatum;
2726 : TransactionId xmin;
2727 : bool isnull;
2728 :
2729 : /*
2730 : * Parse analysis should have blocked ON CONFLICT for all system
2731 : * relations, which includes these. There's no fundamental obstacle to
2732 : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2733 : * ExecUpdate() caller.
2734 : */
2735 : Assert(!resultRelInfo->ri_needLockTagTuple);
2736 :
2737 : /* Determine lock mode to use */
2738 5206 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2739 :
2740 : /*
2741 : * Lock tuple for update. Don't follow updates when tuple cannot be
2742 : * locked without doing so. A row locking conflict here means our
2743 : * previous conclusion that the tuple is conclusively committed is not
2744 : * true anymore.
2745 : */
2746 5206 : test = table_tuple_lock(relation, conflictTid,
2747 5206 : context->estate->es_snapshot,
2748 5206 : existing, context->estate->es_output_cid,
2749 : lockmode, LockWaitBlock, 0,
2750 : &tmfd);
2751 5206 : switch (test)
2752 : {
2753 5182 : case TM_Ok:
2754 : /* success! */
2755 5182 : break;
2756 :
2757 24 : case TM_Invisible:
2758 :
2759 : /*
2760 : * This can occur when a just inserted tuple is updated again in
2761 : * the same command. E.g. because multiple rows with the same
2762 : * conflicting key values are inserted.
2763 : *
2764 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2765 : * case. We do not want to proceed because it would lead to the
2766 : * same row being updated a second time in some unspecified order,
2767 : * and in contrast to plain UPDATEs there's no historical behavior
2768 : * to break.
2769 : *
2770 : * It is the user's responsibility to prevent this situation from
2771 : * occurring. These problems are why the SQL standard similarly
2772 : * specifies that for SQL MERGE, an exception must be raised in
2773 : * the event of an attempt to update the same row twice.
2774 : */
2775 24 : xminDatum = slot_getsysattr(existing,
2776 : MinTransactionIdAttributeNumber,
2777 : &isnull);
2778 : Assert(!isnull);
2779 24 : xmin = DatumGetTransactionId(xminDatum);
2780 :
2781 24 : if (TransactionIdIsCurrentTransactionId(xmin))
2782 24 : ereport(ERROR,
2783 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2784 : /* translator: %s is a SQL command name */
2785 : errmsg("%s command cannot affect row a second time",
2786 : "ON CONFLICT DO UPDATE"),
2787 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2788 :
2789 : /* This shouldn't happen */
2790 0 : elog(ERROR, "attempted to lock invisible tuple");
2791 : break;
2792 :
2793 0 : case TM_SelfModified:
2794 :
2795 : /*
2796 : * This state should never be reached. As a dirty snapshot is used
2797 : * to find conflicting tuples, speculative insertion wouldn't have
2798 : * seen this row to conflict with.
2799 : */
2800 0 : elog(ERROR, "unexpected self-updated tuple");
2801 : break;
2802 :
2803 0 : case TM_Updated:
2804 0 : if (IsolationUsesXactSnapshot())
2805 0 : ereport(ERROR,
2806 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2807 : errmsg("could not serialize access due to concurrent update")));
2808 :
2809 : /*
2810 : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2811 : * a partitioned table we shouldn't reach to a case where tuple to
2812 : * be lock is moved to another partition due to concurrent update
2813 : * of the partition key.
2814 : */
2815 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2816 :
2817 : /*
2818 : * Tell caller to try again from the very start.
2819 : *
2820 : * It does not make sense to use the usual EvalPlanQual() style
2821 : * loop here, as the new version of the row might not conflict
2822 : * anymore, or the conflicting tuple has actually been deleted.
2823 : */
2824 0 : ExecClearTuple(existing);
2825 0 : return false;
2826 :
2827 0 : case TM_Deleted:
2828 0 : if (IsolationUsesXactSnapshot())
2829 0 : ereport(ERROR,
2830 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2831 : errmsg("could not serialize access due to concurrent delete")));
2832 :
2833 : /* see TM_Updated case */
2834 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2835 0 : ExecClearTuple(existing);
2836 0 : return false;
2837 :
2838 0 : default:
2839 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2840 : }
2841 :
2842 : /* Success, the tuple is locked. */
2843 :
2844 : /*
2845 : * Verify that the tuple is visible to our MVCC snapshot if the current
2846 : * isolation level mandates that.
2847 : *
2848 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2849 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2850 : *
2851 : * This means we only ever continue when a new command in the current
2852 : * transaction could see the row, even though in READ COMMITTED mode the
2853 : * tuple will not be visible according to the current statement's
2854 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2855 : * versions.
2856 : */
2857 5182 : ExecCheckTupleVisible(context->estate, relation, existing);
2858 :
2859 : /*
2860 : * Make tuple and any needed join variables available to ExecQual and
2861 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2862 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2863 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2864 : * other redirection.
2865 : */
2866 5182 : econtext->ecxt_scantuple = existing;
2867 5182 : econtext->ecxt_innertuple = excludedSlot;
2868 5182 : econtext->ecxt_outertuple = NULL;
2869 :
2870 5182 : if (!ExecQual(onConflictSetWhere, econtext))
2871 : {
2872 32 : ExecClearTuple(existing); /* see return below */
2873 32 : InstrCountFiltered1(&mtstate->ps, 1);
2874 32 : return true; /* done with the tuple */
2875 : }
2876 :
2877 5150 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2878 : {
2879 : /*
2880 : * Check target's existing tuple against UPDATE-applicable USING
2881 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2882 : *
2883 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2884 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2885 : * but that's almost the extent of its special handling for ON
2886 : * CONFLICT DO UPDATE.
2887 : *
2888 : * The rewriter will also have associated UPDATE applicable straight
2889 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2890 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2891 : * kinds, so there is no danger of spurious over-enforcement in the
2892 : * INSERT or UPDATE path.
2893 : */
2894 60 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2895 : existing,
2896 : mtstate->ps.state);
2897 : }
2898 :
2899 : /* Project the new tuple version */
2900 5126 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2901 :
2902 : /*
2903 : * Note that it is possible that the target tuple has been modified in
2904 : * this session, after the above table_tuple_lock. We choose to not error
2905 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2906 : * This can happen if an UPDATE is triggered from within ExecQual(),
2907 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2908 : * wCTE in the ON CONFLICT's SET.
2909 : */
2910 :
2911 : /* Execute UPDATE with projection */
2912 10222 : *returning = ExecUpdate(context, resultRelInfo,
2913 : conflictTid, NULL, existing,
2914 5126 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2915 : canSetTag);
2916 :
2917 : /*
2918 : * Clear out existing tuple, as there might not be another conflict among
2919 : * the next input rows. Don't want to hold resources till the end of the
2920 : * query. First though, make sure that the returning slot, if any, has a
2921 : * local copy of any OLD pass-by-reference values, if it refers to any OLD
2922 : * columns.
2923 : */
2924 5096 : if (*returning != NULL &&
2925 222 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
2926 6 : ExecMaterializeSlot(*returning);
2927 :
2928 5096 : ExecClearTuple(existing);
2929 :
2930 5096 : return true;
2931 : }
2932 :
2933 : /*
2934 : * Perform MERGE.
2935 : */
2936 : static TupleTableSlot *
2937 14532 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2938 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2939 : {
2940 14532 : TupleTableSlot *rslot = NULL;
2941 : bool matched;
2942 :
2943 : /*-----
2944 : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2945 : * valid, depending on whether the result relation is a table or a view.
2946 : * We execute the first action for which the additional WHEN MATCHED AND
2947 : * quals pass. If an action without quals is found, that action is
2948 : * executed.
2949 : *
2950 : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2951 : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2952 : * in sequence until one passes. This is almost identical to the WHEN
2953 : * MATCHED case, and both cases are handled by ExecMergeMatched().
2954 : *
2955 : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2956 : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2957 : * TARGET] actions in sequence until one passes.
2958 : *
2959 : * Things get interesting in case of concurrent update/delete of the
2960 : * target tuple. Such concurrent update/delete is detected while we are
2961 : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2962 : *
2963 : * A concurrent update can:
2964 : *
2965 : * 1. modify the target tuple so that the results from checking any
2966 : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2967 : * SOURCE actions potentially change, but the result from the join
2968 : * quals does not change.
2969 : *
2970 : * In this case, we are still dealing with the same kind of match
2971 : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2972 : * actions from the start and choose the first one that satisfies the
2973 : * new target tuple.
2974 : *
2975 : * 2. modify the target tuple in the WHEN MATCHED case so that the join
2976 : * quals no longer pass and hence the source and target tuples no
2977 : * longer match.
2978 : *
2979 : * In this case, we are now dealing with a NOT MATCHED case, and we
2980 : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2981 : * TARGET] actions. First ExecMergeMatched() processes the list of
2982 : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2983 : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2984 : * TARGET] actions in sequence until one passes. Thus we may execute
2985 : * two actions; one of each kind.
2986 : *
2987 : * Thus we support concurrent updates that turn MATCHED candidate rows
2988 : * into NOT MATCHED rows. However, we do not attempt to support cases
2989 : * that would turn NOT MATCHED rows into MATCHED rows, or which would
2990 : * cause a target row to match a different source row.
2991 : *
2992 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2993 : * [BY TARGET].
2994 : *
2995 : * ExecMergeMatched() takes care of following the update chain and
2996 : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
2997 : * action, as long as the target tuple still exists. If the target tuple
2998 : * gets deleted or a concurrent update causes the join quals to fail, it
2999 : * returns a matched status of false and we call ExecMergeNotMatched().
3000 : * Given that ExecMergeMatched() always makes progress by following the
3001 : * update chain and we never switch from ExecMergeNotMatched() to
3002 : * ExecMergeMatched(), there is no risk of a livelock.
3003 : */
3004 14532 : matched = tupleid != NULL || oldtuple != NULL;
3005 14532 : if (matched)
3006 11884 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
3007 : canSetTag, &matched);
3008 :
3009 : /*
3010 : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
3011 : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
3012 : * "matched" to false, indicating that it no longer matches).
3013 : */
3014 14442 : if (!matched)
3015 : {
3016 : /*
3017 : * If a concurrent update turned a MATCHED case into a NOT MATCHED
3018 : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
3019 : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
3020 : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
3021 : * SOURCE action, and computed the row to return. If so, we cannot
3022 : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
3023 : * pending (to be processed on the next call to ExecModifyTable()).
3024 : * Otherwise, just process the action now.
3025 : */
3026 2664 : if (rslot == NULL)
3027 2662 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
3028 : else
3029 2 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
3030 : }
3031 :
3032 14388 : return rslot;
3033 : }
3034 :
3035 : /*
3036 : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3037 : * action, depending on whether the join quals are satisfied. If the target
3038 : * relation is a table, the current target tuple is identified by tupleid.
3039 : * Otherwise, if the target relation is a view, oldtuple is the current target
3040 : * tuple from the view.
3041 : *
3042 : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3043 : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3044 : * action do not pass, we check the second, then the third and so on. If we
3045 : * reach the end without finding a qualifying action, we return NULL.
3046 : * Otherwise, we execute the qualifying action and return its RETURNING
3047 : * result, if any, or NULL.
3048 : *
3049 : * On entry, "*matched" is assumed to be true. If a concurrent update or
3050 : * delete is detected that causes the join quals to no longer pass, we set it
3051 : * to false, indicating that the caller should process any NOT MATCHED [BY
3052 : * TARGET] actions.
3053 : *
3054 : * After a concurrent update, we restart from the first action to look for a
3055 : * new qualifying action to execute. If the join quals originally passed, and
3056 : * the concurrent update caused them to no longer pass, then we switch from
3057 : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3058 : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3059 : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3060 : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3061 : */
3062 : static TupleTableSlot *
3063 11884 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3064 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3065 : bool *matched)
3066 : {
3067 11884 : ModifyTableState *mtstate = context->mtstate;
3068 11884 : List **mergeActions = resultRelInfo->ri_MergeActions;
3069 : ItemPointerData lockedtid;
3070 : List *actionStates;
3071 11884 : TupleTableSlot *newslot = NULL;
3072 11884 : TupleTableSlot *rslot = NULL;
3073 11884 : EState *estate = context->estate;
3074 11884 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3075 : bool isNull;
3076 11884 : EPQState *epqstate = &mtstate->mt_epqstate;
3077 : ListCell *l;
3078 :
3079 : /* Expect matched to be true on entry */
3080 : Assert(*matched);
3081 :
3082 : /*
3083 : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3084 : * are done.
3085 : */
3086 11884 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3087 1200 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
3088 528 : return NULL;
3089 :
3090 : /*
3091 : * Make tuple and any needed join variables available to ExecQual and
3092 : * ExecProject. The target's existing tuple is installed in the scantuple.
3093 : * This target relation's slot is required only in the case of a MATCHED
3094 : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3095 : */
3096 11356 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3097 11356 : econtext->ecxt_innertuple = context->planSlot;
3098 11356 : econtext->ecxt_outertuple = NULL;
3099 :
3100 : /*
3101 : * This routine is only invoked for matched target rows, so we should
3102 : * either have the tupleid of the target row, or an old tuple from the
3103 : * target wholerow junk attr.
3104 : */
3105 : Assert(tupleid != NULL || oldtuple != NULL);
3106 11356 : ItemPointerSetInvalid(&lockedtid);
3107 11356 : if (oldtuple != NULL)
3108 : {
3109 : Assert(!resultRelInfo->ri_needLockTagTuple);
3110 96 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3111 : false);
3112 : }
3113 : else
3114 : {
3115 11260 : if (resultRelInfo->ri_needLockTagTuple)
3116 : {
3117 : /*
3118 : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3119 : * that don't match mas_whenqual. MERGE on system catalogs is a
3120 : * minor use case, so don't bother optimizing those.
3121 : */
3122 7330 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3123 : InplaceUpdateTupleLock);
3124 7330 : lockedtid = *tupleid;
3125 : }
3126 11260 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
3127 : tupleid,
3128 : SnapshotAny,
3129 : resultRelInfo->ri_oldTupleSlot))
3130 0 : elog(ERROR, "failed to fetch the target tuple");
3131 : }
3132 :
3133 : /*
3134 : * Test the join condition. If it's satisfied, perform a MATCHED action.
3135 : * Otherwise, perform a NOT MATCHED BY SOURCE action.
3136 : *
3137 : * Note that this join condition will be NULL if there are no NOT MATCHED
3138 : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3139 : * need only consider MATCHED actions here.
3140 : */
3141 11356 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3142 11174 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
3143 : else
3144 182 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3145 :
3146 11356 : lmerge_matched:
3147 :
3148 20202 : foreach(l, actionStates)
3149 : {
3150 11484 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
3151 11484 : CmdType commandType = relaction->mas_action->commandType;
3152 : TM_Result result;
3153 11484 : UpdateContext updateCxt = {0};
3154 :
3155 : /*
3156 : * Test condition, if any.
3157 : *
3158 : * In the absence of any condition, we perform the action
3159 : * unconditionally (no need to check separately since ExecQual() will
3160 : * return true if there are no conditions to evaluate).
3161 : */
3162 11484 : if (!ExecQual(relaction->mas_whenqual, econtext))
3163 8782 : continue;
3164 :
3165 : /*
3166 : * Check if the existing target tuple meets the USING checks of
3167 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3168 : * error.
3169 : *
3170 : * The WITH CHECK quals for UPDATE RLS policies are applied in
3171 : * ExecUpdateAct() and hence we need not do anything special to handle
3172 : * them.
3173 : *
3174 : * NOTE: We must do this after WHEN quals are evaluated, so that we
3175 : * check policies only when they matter.
3176 : */
3177 2702 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3178 : {
3179 90 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
3180 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
3181 : resultRelInfo,
3182 : resultRelInfo->ri_oldTupleSlot,
3183 90 : context->mtstate->ps.state);
3184 : }
3185 :
3186 : /* Perform stated action */
3187 2678 : switch (commandType)
3188 : {
3189 2134 : case CMD_UPDATE:
3190 :
3191 : /*
3192 : * Project the output tuple, and use that to update the table.
3193 : * We don't need to filter out junk attributes, because the
3194 : * UPDATE action's targetlist doesn't have any.
3195 : */
3196 2134 : newslot = ExecProject(relaction->mas_proj);
3197 :
3198 2134 : mtstate->mt_merge_action = relaction;
3199 2134 : if (!ExecUpdatePrologue(context, resultRelInfo,
3200 : tupleid, NULL, newslot, &result))
3201 : {
3202 18 : if (result == TM_Ok)
3203 156 : goto out; /* "do nothing" */
3204 :
3205 12 : break; /* concurrent update/delete */
3206 : }
3207 :
3208 : /* INSTEAD OF ROW UPDATE Triggers */
3209 2116 : if (resultRelInfo->ri_TrigDesc &&
3210 334 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3211 : {
3212 78 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3213 : oldtuple, newslot))
3214 0 : goto out; /* "do nothing" */
3215 : }
3216 : else
3217 : {
3218 : /* checked ri_needLockTagTuple above */
3219 : Assert(oldtuple == NULL);
3220 :
3221 2038 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3222 : NULL, newslot, canSetTag,
3223 : &updateCxt);
3224 :
3225 : /*
3226 : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3227 : * cross-partition update was done, then there's nothing
3228 : * else for us to do --- the UPDATE has been turned into a
3229 : * DELETE and an INSERT, and we must not perform any of
3230 : * the usual post-update tasks. Also, the RETURNING tuple
3231 : * (if any) has been projected, so we can just return
3232 : * that.
3233 : */
3234 2018 : if (updateCxt.crossPartUpdate)
3235 : {
3236 134 : mtstate->mt_merge_updated += 1;
3237 134 : rslot = context->cpUpdateReturningSlot;
3238 134 : goto out;
3239 : }
3240 : }
3241 :
3242 1962 : if (result == TM_Ok)
3243 : {
3244 1890 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3245 : tupleid, NULL, newslot);
3246 1878 : mtstate->mt_merge_updated += 1;
3247 : }
3248 1950 : break;
3249 :
3250 514 : case CMD_DELETE:
3251 514 : mtstate->mt_merge_action = relaction;
3252 514 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3253 : NULL, NULL, &result))
3254 : {
3255 12 : if (result == TM_Ok)
3256 6 : goto out; /* "do nothing" */
3257 :
3258 6 : break; /* concurrent update/delete */
3259 : }
3260 :
3261 : /* INSTEAD OF ROW DELETE Triggers */
3262 502 : if (resultRelInfo->ri_TrigDesc &&
3263 44 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3264 : {
3265 6 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3266 : oldtuple))
3267 0 : goto out; /* "do nothing" */
3268 : }
3269 : else
3270 : {
3271 : /* checked ri_needLockTagTuple above */
3272 : Assert(oldtuple == NULL);
3273 :
3274 496 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3275 : false);
3276 : }
3277 :
3278 502 : if (result == TM_Ok)
3279 : {
3280 484 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3281 : false);
3282 484 : mtstate->mt_merge_deleted += 1;
3283 : }
3284 502 : break;
3285 :
3286 30 : case CMD_NOTHING:
3287 : /* Doing nothing is always OK */
3288 30 : result = TM_Ok;
3289 30 : break;
3290 :
3291 0 : default:
3292 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3293 : }
3294 :
3295 2500 : switch (result)
3296 : {
3297 2392 : case TM_Ok:
3298 : /* all good; perform final actions */
3299 2392 : if (canSetTag && commandType != CMD_NOTHING)
3300 2344 : (estate->es_processed)++;
3301 :
3302 2392 : break;
3303 :
3304 32 : case TM_SelfModified:
3305 :
3306 : /*
3307 : * The target tuple was already updated or deleted by the
3308 : * current command, or by a later command in the current
3309 : * transaction. The former case is explicitly disallowed by
3310 : * the SQL standard for MERGE, which insists that the MERGE
3311 : * join condition should not join a target row to more than
3312 : * one source row.
3313 : *
3314 : * The latter case arises if the tuple is modified by a
3315 : * command in a BEFORE trigger, or perhaps by a command in a
3316 : * volatile function used in the query. In such situations we
3317 : * should not ignore the MERGE action, but it is equally
3318 : * unsafe to proceed. We don't want to discard the original
3319 : * MERGE action while keeping the triggered actions based on
3320 : * it; and it would be no better to allow the original MERGE
3321 : * action while discarding the updates that it triggered. So
3322 : * throwing an error is the only safe course.
3323 : */
3324 32 : if (context->tmfd.cmax != estate->es_output_cid)
3325 12 : ereport(ERROR,
3326 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3327 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3328 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3329 :
3330 20 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3331 20 : ereport(ERROR,
3332 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3333 : /* translator: %s is a SQL command name */
3334 : errmsg("%s command cannot affect row a second time",
3335 : "MERGE"),
3336 : errhint("Ensure that not more than one source row matches any one target row.")));
3337 :
3338 : /* This shouldn't happen */
3339 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3340 : break;
3341 :
3342 10 : case TM_Deleted:
3343 10 : if (IsolationUsesXactSnapshot())
3344 0 : ereport(ERROR,
3345 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3346 : errmsg("could not serialize access due to concurrent delete")));
3347 :
3348 : /*
3349 : * If the tuple was already deleted, set matched to false to
3350 : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3351 : */
3352 10 : *matched = false;
3353 10 : goto out;
3354 :
3355 66 : case TM_Updated:
3356 : {
3357 : bool was_matched;
3358 : Relation resultRelationDesc;
3359 : TupleTableSlot *epqslot,
3360 : *inputslot;
3361 : LockTupleMode lockmode;
3362 :
3363 : /*
3364 : * The target tuple was concurrently updated by some other
3365 : * transaction. If we are currently processing a MATCHED
3366 : * action, use EvalPlanQual() with the new version of the
3367 : * tuple and recheck the join qual, to detect a change
3368 : * from the MATCHED to the NOT MATCHED cases. If we are
3369 : * already processing a NOT MATCHED BY SOURCE action, we
3370 : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3371 : * MATCHED).
3372 : */
3373 66 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3374 66 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3375 66 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3376 :
3377 66 : if (was_matched)
3378 66 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3379 : resultRelInfo->ri_RangeTableIndex);
3380 : else
3381 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3382 :
3383 66 : result = table_tuple_lock(resultRelationDesc, tupleid,
3384 : estate->es_snapshot,
3385 : inputslot, estate->es_output_cid,
3386 : lockmode, LockWaitBlock,
3387 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3388 : &context->tmfd);
3389 66 : switch (result)
3390 : {
3391 64 : case TM_Ok:
3392 :
3393 : /*
3394 : * If the tuple was updated and migrated to
3395 : * another partition concurrently, the current
3396 : * MERGE implementation can't follow. There's
3397 : * probably a better way to handle this case, but
3398 : * it'd require recognizing the relation to which
3399 : * the tuple moved, and setting our current
3400 : * resultRelInfo to that.
3401 : */
3402 64 : if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid))
3403 0 : ereport(ERROR,
3404 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3405 : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3406 :
3407 : /*
3408 : * If this was a MATCHED case, use EvalPlanQual()
3409 : * to recheck the join condition.
3410 : */
3411 64 : if (was_matched)
3412 : {
3413 64 : epqslot = EvalPlanQual(epqstate,
3414 : resultRelationDesc,
3415 : resultRelInfo->ri_RangeTableIndex,
3416 : inputslot);
3417 :
3418 : /*
3419 : * If the subplan didn't return a tuple, then
3420 : * we must be dealing with an inner join for
3421 : * which the join condition no longer matches.
3422 : * This can only happen if there are no NOT
3423 : * MATCHED actions, and so there is nothing
3424 : * more to do.
3425 : */
3426 64 : if (TupIsNull(epqslot))
3427 0 : goto out;
3428 :
3429 : /*
3430 : * If we got a NULL ctid from the subplan, the
3431 : * join quals no longer pass and we switch to
3432 : * the NOT MATCHED BY SOURCE case.
3433 : */
3434 64 : (void) ExecGetJunkAttribute(epqslot,
3435 64 : resultRelInfo->ri_RowIdAttNo,
3436 : &isNull);
3437 64 : if (isNull)
3438 4 : *matched = false;
3439 :
3440 : /*
3441 : * Otherwise, recheck the join quals to see if
3442 : * we need to switch to the NOT MATCHED BY
3443 : * SOURCE case.
3444 : */
3445 64 : if (resultRelInfo->ri_needLockTagTuple)
3446 : {
3447 2 : if (ItemPointerIsValid(&lockedtid))
3448 2 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3449 : InplaceUpdateTupleLock);
3450 2 : LockTuple(resultRelInfo->ri_RelationDesc, &context->tmfd.ctid,
3451 : InplaceUpdateTupleLock);
3452 2 : lockedtid = context->tmfd.ctid;
3453 : }
3454 64 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3455 : &context->tmfd.ctid,
3456 : SnapshotAny,
3457 : resultRelInfo->ri_oldTupleSlot))
3458 0 : elog(ERROR, "failed to fetch the target tuple");
3459 :
3460 64 : if (*matched)
3461 60 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3462 : econtext);
3463 :
3464 : /* Switch lists, if necessary */
3465 64 : if (!*matched)
3466 6 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3467 : }
3468 :
3469 : /*
3470 : * Loop back and process the MATCHED or NOT
3471 : * MATCHED BY SOURCE actions from the start.
3472 : */
3473 64 : goto lmerge_matched;
3474 :
3475 0 : case TM_Deleted:
3476 :
3477 : /*
3478 : * tuple already deleted; tell caller to run NOT
3479 : * MATCHED [BY TARGET] actions
3480 : */
3481 0 : *matched = false;
3482 0 : goto out;
3483 :
3484 2 : case TM_SelfModified:
3485 :
3486 : /*
3487 : * This can be reached when following an update
3488 : * chain from a tuple updated by another session,
3489 : * reaching a tuple that was already updated or
3490 : * deleted by the current command, or by a later
3491 : * command in the current transaction. As above,
3492 : * this should always be treated as an error.
3493 : */
3494 2 : if (context->tmfd.cmax != estate->es_output_cid)
3495 0 : ereport(ERROR,
3496 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3497 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3498 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3499 :
3500 2 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3501 2 : ereport(ERROR,
3502 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3503 : /* translator: %s is a SQL command name */
3504 : errmsg("%s command cannot affect row a second time",
3505 : "MERGE"),
3506 : errhint("Ensure that not more than one source row matches any one target row.")));
3507 :
3508 : /* This shouldn't happen */
3509 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3510 : goto out;
3511 :
3512 0 : default:
3513 : /* see table_tuple_lock call in ExecDelete() */
3514 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3515 : result);
3516 : goto out;
3517 : }
3518 : }
3519 :
3520 0 : case TM_Invisible:
3521 : case TM_WouldBlock:
3522 : case TM_BeingModified:
3523 : /* these should not occur */
3524 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3525 : break;
3526 : }
3527 :
3528 : /* Process RETURNING if present */
3529 2392 : if (resultRelInfo->ri_projectReturning)
3530 : {
3531 408 : switch (commandType)
3532 : {
3533 174 : case CMD_UPDATE:
3534 174 : rslot = ExecProcessReturning(context,
3535 : resultRelInfo,
3536 : CMD_UPDATE,
3537 : resultRelInfo->ri_oldTupleSlot,
3538 : newslot,
3539 : context->planSlot);
3540 174 : break;
3541 :
3542 234 : case CMD_DELETE:
3543 234 : rslot = ExecProcessReturning(context,
3544 : resultRelInfo,
3545 : CMD_DELETE,
3546 : resultRelInfo->ri_oldTupleSlot,
3547 : NULL,
3548 : context->planSlot);
3549 234 : break;
3550 :
3551 0 : case CMD_NOTHING:
3552 0 : break;
3553 :
3554 0 : default:
3555 0 : elog(ERROR, "unrecognized commandType: %d",
3556 : (int) commandType);
3557 : }
3558 : }
3559 :
3560 : /*
3561 : * We've activated one of the WHEN clauses, so we don't search
3562 : * further. This is required behaviour, not an optimization.
3563 : */
3564 2392 : break;
3565 : }
3566 :
3567 : /*
3568 : * Successfully executed an action or no qualifying action was found.
3569 : */
3570 11266 : out:
3571 11266 : if (ItemPointerIsValid(&lockedtid))
3572 7330 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3573 : InplaceUpdateTupleLock);
3574 11266 : return rslot;
3575 : }
3576 :
3577 : /*
3578 : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3579 : */
3580 : static TupleTableSlot *
3581 2664 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3582 : bool canSetTag)
3583 : {
3584 2664 : ModifyTableState *mtstate = context->mtstate;
3585 2664 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3586 : List *actionStates;
3587 2664 : TupleTableSlot *rslot = NULL;
3588 : ListCell *l;
3589 :
3590 : /*
3591 : * For INSERT actions, the root relation's merge action is OK since the
3592 : * INSERT's targetlist and the WHEN conditions can only refer to the
3593 : * source relation and hence it does not matter which result relation we
3594 : * work with.
3595 : *
3596 : * XXX does this mean that we can avoid creating copies of actionStates on
3597 : * partitioned tables, for not-matched actions?
3598 : */
3599 2664 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3600 :
3601 : /*
3602 : * Make source tuple available to ExecQual and ExecProject. We don't need
3603 : * the target tuple, since the WHEN quals and targetlist can't refer to
3604 : * the target columns.
3605 : */
3606 2664 : econtext->ecxt_scantuple = NULL;
3607 2664 : econtext->ecxt_innertuple = context->planSlot;
3608 2664 : econtext->ecxt_outertuple = NULL;
3609 :
3610 3534 : foreach(l, actionStates)
3611 : {
3612 2664 : MergeActionState *action = (MergeActionState *) lfirst(l);
3613 2664 : CmdType commandType = action->mas_action->commandType;
3614 : TupleTableSlot *newslot;
3615 :
3616 : /*
3617 : * Test condition, if any.
3618 : *
3619 : * In the absence of any condition, we perform the action
3620 : * unconditionally (no need to check separately since ExecQual() will
3621 : * return true if there are no conditions to evaluate).
3622 : */
3623 2664 : if (!ExecQual(action->mas_whenqual, econtext))
3624 870 : continue;
3625 :
3626 : /* Perform stated action */
3627 1794 : switch (commandType)
3628 : {
3629 1794 : case CMD_INSERT:
3630 :
3631 : /*
3632 : * Project the tuple. In case of a partitioned table, the
3633 : * projection was already built to use the root's descriptor,
3634 : * so we don't need to map the tuple here.
3635 : */
3636 1794 : newslot = ExecProject(action->mas_proj);
3637 1794 : mtstate->mt_merge_action = action;
3638 :
3639 1794 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3640 : newslot, canSetTag, NULL, NULL);
3641 1740 : mtstate->mt_merge_inserted += 1;
3642 1740 : break;
3643 0 : case CMD_NOTHING:
3644 : /* Do nothing */
3645 0 : break;
3646 0 : default:
3647 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3648 : }
3649 :
3650 : /*
3651 : * We've activated one of the WHEN clauses, so we don't search
3652 : * further. This is required behaviour, not an optimization.
3653 : */
3654 1740 : break;
3655 : }
3656 :
3657 2610 : return rslot;
3658 : }
3659 :
3660 : /*
3661 : * Initialize state for execution of MERGE.
3662 : */
3663 : void
3664 1504 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3665 : {
3666 1504 : List *mergeActionLists = mtstate->mt_mergeActionLists;
3667 1504 : List *mergeJoinConditions = mtstate->mt_mergeJoinConditions;
3668 1504 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3669 : ResultRelInfo *resultRelInfo;
3670 : ExprContext *econtext;
3671 : ListCell *lc;
3672 : int i;
3673 :
3674 1504 : if (mergeActionLists == NIL)
3675 0 : return;
3676 :
3677 1504 : mtstate->mt_merge_subcommands = 0;
3678 :
3679 1504 : if (mtstate->ps.ps_ExprContext == NULL)
3680 1254 : ExecAssignExprContext(estate, &mtstate->ps);
3681 1504 : econtext = mtstate->ps.ps_ExprContext;
3682 :
3683 : /*
3684 : * Create a MergeActionState for each action on the mergeActionList and
3685 : * add it to either a list of matched actions or not-matched actions.
3686 : *
3687 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3688 : * anything here, do so there too.
3689 : */
3690 1504 : i = 0;
3691 3240 : foreach(lc, mergeActionLists)
3692 : {
3693 1736 : List *mergeActionList = lfirst(lc);
3694 : Node *joinCondition;
3695 : TupleDesc relationDesc;
3696 : ListCell *l;
3697 :
3698 1736 : joinCondition = (Node *) list_nth(mergeJoinConditions, i);
3699 1736 : resultRelInfo = mtstate->resultRelInfo + i;
3700 1736 : i++;
3701 1736 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3702 :
3703 : /* initialize slots for MERGE fetches from this rel */
3704 1736 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3705 1736 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3706 :
3707 : /* initialize state for join condition checking */
3708 1736 : resultRelInfo->ri_MergeJoinCondition =
3709 1736 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3710 :
3711 4796 : foreach(l, mergeActionList)
3712 : {
3713 3060 : MergeAction *action = (MergeAction *) lfirst(l);
3714 : MergeActionState *action_state;
3715 : TupleTableSlot *tgtslot;
3716 : TupleDesc tgtdesc;
3717 :
3718 : /*
3719 : * Build action merge state for this rel. (For partitions,
3720 : * equivalent code exists in ExecInitPartitionInfo.)
3721 : */
3722 3060 : action_state = makeNode(MergeActionState);
3723 3060 : action_state->mas_action = action;
3724 3060 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3725 : &mtstate->ps);
3726 :
3727 : /*
3728 : * We create three lists - one for each MergeMatchKind - and stick
3729 : * the MergeActionState into the appropriate list.
3730 : */
3731 6120 : resultRelInfo->ri_MergeActions[action->matchKind] =
3732 3060 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3733 : action_state);
3734 :
3735 3060 : switch (action->commandType)
3736 : {
3737 1016 : case CMD_INSERT:
3738 1016 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3739 : action->targetList);
3740 :
3741 : /*
3742 : * If the MERGE targets a partitioned table, any INSERT
3743 : * actions must be routed through it, not the child
3744 : * relations. Initialize the routing struct and the root
3745 : * table's "new" tuple slot for that, if not already done.
3746 : * The projection we prepare, for all relations, uses the
3747 : * root relation descriptor, and targets the plan's root
3748 : * slot. (This is consistent with the fact that we
3749 : * checked the plan output to match the root relation,
3750 : * above.)
3751 : */
3752 1016 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3753 : RELKIND_PARTITIONED_TABLE)
3754 : {
3755 328 : if (mtstate->mt_partition_tuple_routing == NULL)
3756 : {
3757 : /*
3758 : * Initialize planstate for routing if not already
3759 : * done.
3760 : *
3761 : * Note that the slot is managed as a standalone
3762 : * slot belonging to ModifyTableState, so we pass
3763 : * NULL for the 2nd argument.
3764 : */
3765 154 : mtstate->mt_root_tuple_slot =
3766 154 : table_slot_create(rootRelInfo->ri_RelationDesc,
3767 : NULL);
3768 154 : mtstate->mt_partition_tuple_routing =
3769 154 : ExecSetupPartitionTupleRouting(estate,
3770 : rootRelInfo->ri_RelationDesc);
3771 : }
3772 328 : tgtslot = mtstate->mt_root_tuple_slot;
3773 328 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3774 : }
3775 : else
3776 : {
3777 : /* not partitioned? use the stock relation and slot */
3778 688 : tgtslot = resultRelInfo->ri_newTupleSlot;
3779 688 : tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3780 : }
3781 :
3782 1016 : action_state->mas_proj =
3783 1016 : ExecBuildProjectionInfo(action->targetList, econtext,
3784 : tgtslot,
3785 : &mtstate->ps,
3786 : tgtdesc);
3787 :
3788 1016 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3789 1016 : break;
3790 1528 : case CMD_UPDATE:
3791 1528 : action_state->mas_proj =
3792 1528 : ExecBuildUpdateProjection(action->targetList,
3793 : true,
3794 : action->updateColnos,
3795 : relationDesc,
3796 : econtext,
3797 : resultRelInfo->ri_newTupleSlot,
3798 : &mtstate->ps);
3799 1528 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3800 1528 : break;
3801 452 : case CMD_DELETE:
3802 452 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3803 452 : break;
3804 64 : case CMD_NOTHING:
3805 64 : break;
3806 0 : default:
3807 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3808 : break;
3809 : }
3810 : }
3811 : }
3812 : }
3813 :
3814 : /*
3815 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3816 : *
3817 : * We mark 'projectNewInfoValid' even though the projections themselves
3818 : * are not initialized here.
3819 : */
3820 : void
3821 1760 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3822 : ResultRelInfo *resultRelInfo)
3823 : {
3824 1760 : EState *estate = mtstate->ps.state;
3825 :
3826 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3827 :
3828 1760 : resultRelInfo->ri_oldTupleSlot =
3829 1760 : table_slot_create(resultRelInfo->ri_RelationDesc,
3830 : &estate->es_tupleTable);
3831 1760 : resultRelInfo->ri_newTupleSlot =
3832 1760 : table_slot_create(resultRelInfo->ri_RelationDesc,
3833 : &estate->es_tupleTable);
3834 1760 : resultRelInfo->ri_projectNewInfoValid = true;
3835 1760 : }
3836 :
3837 : /*
3838 : * Process BEFORE EACH STATEMENT triggers
3839 : */
3840 : static void
3841 119996 : fireBSTriggers(ModifyTableState *node)
3842 : {
3843 119996 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3844 119996 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3845 :
3846 119996 : switch (node->operation)
3847 : {
3848 93118 : case CMD_INSERT:
3849 93118 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3850 93106 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3851 834 : ExecBSUpdateTriggers(node->ps.state,
3852 : resultRelInfo);
3853 93106 : break;
3854 13452 : case CMD_UPDATE:
3855 13452 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3856 13452 : break;
3857 12066 : case CMD_DELETE:
3858 12066 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3859 12066 : break;
3860 1360 : case CMD_MERGE:
3861 1360 : if (node->mt_merge_subcommands & MERGE_INSERT)
3862 746 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3863 1360 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3864 936 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3865 1360 : if (node->mt_merge_subcommands & MERGE_DELETE)
3866 368 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3867 1360 : break;
3868 0 : default:
3869 0 : elog(ERROR, "unknown operation");
3870 : break;
3871 : }
3872 119984 : }
3873 :
3874 : /*
3875 : * Process AFTER EACH STATEMENT triggers
3876 : */
3877 : static void
3878 116744 : fireASTriggers(ModifyTableState *node)
3879 : {
3880 116744 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3881 116744 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3882 :
3883 116744 : switch (node->operation)
3884 : {
3885 90824 : case CMD_INSERT:
3886 90824 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3887 732 : ExecASUpdateTriggers(node->ps.state,
3888 : resultRelInfo,
3889 732 : node->mt_oc_transition_capture);
3890 90824 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3891 90824 : node->mt_transition_capture);
3892 90824 : break;
3893 12744 : case CMD_UPDATE:
3894 12744 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3895 12744 : node->mt_transition_capture);
3896 12744 : break;
3897 11960 : case CMD_DELETE:
3898 11960 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3899 11960 : node->mt_transition_capture);
3900 11960 : break;
3901 1216 : case CMD_MERGE:
3902 1216 : if (node->mt_merge_subcommands & MERGE_DELETE)
3903 332 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3904 332 : node->mt_transition_capture);
3905 1216 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3906 840 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3907 840 : node->mt_transition_capture);
3908 1216 : if (node->mt_merge_subcommands & MERGE_INSERT)
3909 684 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3910 684 : node->mt_transition_capture);
3911 1216 : break;
3912 0 : default:
3913 0 : elog(ERROR, "unknown operation");
3914 : break;
3915 : }
3916 116744 : }
3917 :
3918 : /*
3919 : * Set up the state needed for collecting transition tuples for AFTER
3920 : * triggers.
3921 : */
3922 : static void
3923 120340 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
3924 : {
3925 120340 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
3926 120340 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
3927 :
3928 : /* Check for transition tables on the directly targeted relation. */
3929 120340 : mtstate->mt_transition_capture =
3930 120340 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3931 120340 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3932 : mtstate->operation);
3933 120340 : if (plan->operation == CMD_INSERT &&
3934 93120 : plan->onConflictAction == ONCONFLICT_UPDATE)
3935 834 : mtstate->mt_oc_transition_capture =
3936 834 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3937 834 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3938 : CMD_UPDATE);
3939 120340 : }
3940 :
3941 : /*
3942 : * ExecPrepareTupleRouting --- prepare for routing one tuple
3943 : *
3944 : * Determine the partition in which the tuple in slot is to be inserted,
3945 : * and return its ResultRelInfo in *partRelInfo. The return value is
3946 : * a slot holding the tuple of the partition rowtype.
3947 : *
3948 : * This also sets the transition table information in mtstate based on the
3949 : * selected partition.
3950 : */
3951 : static TupleTableSlot *
3952 728272 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
3953 : EState *estate,
3954 : PartitionTupleRouting *proute,
3955 : ResultRelInfo *targetRelInfo,
3956 : TupleTableSlot *slot,
3957 : ResultRelInfo **partRelInfo)
3958 : {
3959 : ResultRelInfo *partrel;
3960 : TupleConversionMap *map;
3961 :
3962 : /*
3963 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3964 : * not find a valid partition for the tuple in 'slot' then an error is
3965 : * raised. An error may also be raised if the found partition is not a
3966 : * valid target for INSERTs. This is required since a partitioned table
3967 : * UPDATE to another partition becomes a DELETE+INSERT.
3968 : */
3969 728272 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3970 :
3971 : /*
3972 : * If we're capturing transition tuples, we might need to convert from the
3973 : * partition rowtype to root partitioned table's rowtype. But if there
3974 : * are no BEFORE triggers on the partition that could change the tuple, we
3975 : * can just remember the original unconverted tuple to avoid a needless
3976 : * round trip conversion.
3977 : */
3978 728068 : if (mtstate->mt_transition_capture != NULL)
3979 : {
3980 : bool has_before_insert_row_trig;
3981 :
3982 192 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3983 42 : partrel->ri_TrigDesc->trig_insert_before_row);
3984 :
3985 150 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
3986 150 : !has_before_insert_row_trig ? slot : NULL;
3987 : }
3988 :
3989 : /*
3990 : * Convert the tuple, if necessary.
3991 : */
3992 728068 : map = ExecGetRootToChildMap(partrel, estate);
3993 728068 : if (map != NULL)
3994 : {
3995 68460 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3996 :
3997 68460 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3998 : }
3999 :
4000 728068 : *partRelInfo = partrel;
4001 728068 : return slot;
4002 : }
4003 :
4004 : /* ----------------------------------------------------------------
4005 : * ExecModifyTable
4006 : *
4007 : * Perform table modifications as required, and return RETURNING results
4008 : * if needed.
4009 : * ----------------------------------------------------------------
4010 : */
4011 : static TupleTableSlot *
4012 128778 : ExecModifyTable(PlanState *pstate)
4013 : {
4014 128778 : ModifyTableState *node = castNode(ModifyTableState, pstate);
4015 : ModifyTableContext context;
4016 128778 : EState *estate = node->ps.state;
4017 128778 : CmdType operation = node->operation;
4018 : ResultRelInfo *resultRelInfo;
4019 : PlanState *subplanstate;
4020 : TupleTableSlot *slot;
4021 : TupleTableSlot *oldSlot;
4022 : ItemPointerData tuple_ctid;
4023 : HeapTupleData oldtupdata;
4024 : HeapTuple oldtuple;
4025 : ItemPointer tupleid;
4026 : bool tuplock;
4027 :
4028 128778 : CHECK_FOR_INTERRUPTS();
4029 :
4030 : /*
4031 : * This should NOT get called during EvalPlanQual; we should have passed a
4032 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
4033 : * Assert because this condition is easy to miss in testing. (Note:
4034 : * although ModifyTable should not get executed within an EvalPlanQual
4035 : * operation, we do have to allow it to be initialized and shut down in
4036 : * case it is within a CTE subplan. Hence this test must be here, not in
4037 : * ExecInitModifyTable.)
4038 : */
4039 128778 : if (estate->es_epq_active != NULL)
4040 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4041 :
4042 : /*
4043 : * If we've already completed processing, don't try to do more. We need
4044 : * this test because ExecPostprocessPlan might call us an extra time, and
4045 : * our subplan's nodes aren't necessarily robust against being called
4046 : * extra times.
4047 : */
4048 128778 : if (node->mt_done)
4049 794 : return NULL;
4050 :
4051 : /*
4052 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
4053 : */
4054 127984 : if (node->fireBSTriggers)
4055 : {
4056 119996 : fireBSTriggers(node);
4057 119984 : node->fireBSTriggers = false;
4058 : }
4059 :
4060 : /* Preload local variables */
4061 127972 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4062 127972 : subplanstate = outerPlanState(node);
4063 :
4064 : /* Set global context */
4065 127972 : context.mtstate = node;
4066 127972 : context.epqstate = &node->mt_epqstate;
4067 127972 : context.estate = estate;
4068 :
4069 : /*
4070 : * Fetch rows from subplan, and execute the required table modification
4071 : * for each row.
4072 : */
4073 : for (;;)
4074 : {
4075 : /*
4076 : * Reset the per-output-tuple exprcontext. This is needed because
4077 : * triggers expect to use that context as workspace. It's a bit ugly
4078 : * to do this below the top level of the plan, however. We might need
4079 : * to rethink this later.
4080 : */
4081 14255888 : ResetPerTupleExprContext(estate);
4082 :
4083 : /*
4084 : * Reset per-tuple memory context used for processing on conflict and
4085 : * returning clauses, to free any expression evaluation storage
4086 : * allocated in the previous cycle.
4087 : */
4088 14255888 : if (pstate->ps_ExprContext)
4089 351102 : ResetExprContext(pstate->ps_ExprContext);
4090 :
4091 : /*
4092 : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4093 : * to execute, do so now --- see the comments in ExecMerge().
4094 : */
4095 14255888 : if (node->mt_merge_pending_not_matched != NULL)
4096 : {
4097 2 : context.planSlot = node->mt_merge_pending_not_matched;
4098 2 : context.cpDeletedSlot = NULL;
4099 :
4100 2 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4101 2 : node->canSetTag);
4102 :
4103 : /* Clear the pending action */
4104 2 : node->mt_merge_pending_not_matched = NULL;
4105 :
4106 : /*
4107 : * If we got a RETURNING result, return it to the caller. We'll
4108 : * continue the work on next call.
4109 : */
4110 2 : if (slot)
4111 2 : return slot;
4112 :
4113 0 : continue; /* continue with the next tuple */
4114 : }
4115 :
4116 : /* Fetch the next row from subplan */
4117 14255886 : context.planSlot = ExecProcNode(subplanstate);
4118 14255468 : context.cpDeletedSlot = NULL;
4119 :
4120 : /* No more tuples to process? */
4121 14255468 : if (TupIsNull(context.planSlot))
4122 : break;
4123 :
4124 : /*
4125 : * When there are multiple result relations, each tuple contains a
4126 : * junk column that gives the OID of the rel from which it came.
4127 : * Extract it and select the correct result relation.
4128 : */
4129 14138724 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
4130 : {
4131 : Datum datum;
4132 : bool isNull;
4133 : Oid resultoid;
4134 :
4135 5118 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4136 : &isNull);
4137 5118 : if (isNull)
4138 : {
4139 : /*
4140 : * For commands other than MERGE, any tuples having InvalidOid
4141 : * for tableoid are errors. For MERGE, we may need to handle
4142 : * them as WHEN NOT MATCHED clauses if any, so do that.
4143 : *
4144 : * Note that we use the node's toplevel resultRelInfo, not any
4145 : * specific partition's.
4146 : */
4147 484 : if (operation == CMD_MERGE)
4148 : {
4149 484 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4150 :
4151 484 : slot = ExecMerge(&context, node->resultRelInfo,
4152 484 : NULL, NULL, node->canSetTag);
4153 :
4154 : /*
4155 : * If we got a RETURNING result, return it to the caller.
4156 : * We'll continue the work on next call.
4157 : */
4158 478 : if (slot)
4159 32 : return slot;
4160 :
4161 446 : continue; /* continue with the next tuple */
4162 : }
4163 :
4164 0 : elog(ERROR, "tableoid is NULL");
4165 : }
4166 4634 : resultoid = DatumGetObjectId(datum);
4167 :
4168 : /* If it's not the same as last time, we need to locate the rel */
4169 4634 : if (resultoid != node->mt_lastResultOid)
4170 3160 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4171 : false, true);
4172 : }
4173 :
4174 : /*
4175 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4176 : * here is compute the RETURNING expressions.
4177 : */
4178 14138240 : if (resultRelInfo->ri_usesFdwDirectModify)
4179 : {
4180 : Assert(resultRelInfo->ri_projectReturning);
4181 :
4182 : /*
4183 : * A scan slot containing the data that was actually inserted,
4184 : * updated or deleted has already been made available to
4185 : * ExecProcessReturning by IterateDirectModify, so no need to
4186 : * provide it here. The individual old and new slots are not
4187 : * needed, since direct-modify is disabled if the RETURNING list
4188 : * refers to OLD/NEW values.
4189 : */
4190 : Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4191 : (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4192 :
4193 694 : slot = ExecProcessReturning(&context, resultRelInfo, operation,
4194 : NULL, NULL, context.planSlot);
4195 :
4196 694 : return slot;
4197 : }
4198 :
4199 14137546 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4200 14137546 : slot = context.planSlot;
4201 :
4202 14137546 : tupleid = NULL;
4203 14137546 : oldtuple = NULL;
4204 :
4205 : /*
4206 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4207 : * to be updated/deleted/merged. For a heap relation, that's a TID;
4208 : * otherwise we may have a wholerow junk attr that carries the old
4209 : * tuple in toto. Keep this in step with the part of
4210 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4211 : */
4212 14137546 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4213 : operation == CMD_MERGE)
4214 : {
4215 : char relkind;
4216 : Datum datum;
4217 : bool isNull;
4218 :
4219 1975764 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4220 1975764 : if (relkind == RELKIND_RELATION ||
4221 562 : relkind == RELKIND_MATVIEW ||
4222 : relkind == RELKIND_PARTITIONED_TABLE)
4223 : {
4224 : /* ri_RowIdAttNo refers to a ctid attribute */
4225 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
4226 1975208 : datum = ExecGetJunkAttribute(slot,
4227 1975208 : resultRelInfo->ri_RowIdAttNo,
4228 : &isNull);
4229 :
4230 : /*
4231 : * For commands other than MERGE, any tuples having a null row
4232 : * identifier are errors. For MERGE, we may need to handle
4233 : * them as WHEN NOT MATCHED clauses if any, so do that.
4234 : *
4235 : * Note that we use the node's toplevel resultRelInfo, not any
4236 : * specific partition's.
4237 : */
4238 1975208 : if (isNull)
4239 : {
4240 2116 : if (operation == CMD_MERGE)
4241 : {
4242 2116 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4243 :
4244 2116 : slot = ExecMerge(&context, node->resultRelInfo,
4245 2116 : NULL, NULL, node->canSetTag);
4246 :
4247 : /*
4248 : * If we got a RETURNING result, return it to the
4249 : * caller. We'll continue the work on next call.
4250 : */
4251 2074 : if (slot)
4252 120 : return slot;
4253 :
4254 1996 : continue; /* continue with the next tuple */
4255 : }
4256 :
4257 0 : elog(ERROR, "ctid is NULL");
4258 : }
4259 :
4260 1973092 : tupleid = (ItemPointer) DatumGetPointer(datum);
4261 1973092 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4262 1973092 : tupleid = &tuple_ctid;
4263 : }
4264 :
4265 : /*
4266 : * Use the wholerow attribute, when available, to reconstruct the
4267 : * old relation tuple. The old tuple serves one or both of two
4268 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4269 : * provides values for any unchanged columns for the NEW tuple of
4270 : * an UPDATE, because the subplan does not produce all the columns
4271 : * of the target table.
4272 : *
4273 : * Note that the wholerow attribute does not carry system columns,
4274 : * so foreign table triggers miss seeing those, except that we
4275 : * know enough here to set t_tableOid. Quite separately from
4276 : * this, the FDW may fetch its own junk attrs to identify the row.
4277 : *
4278 : * Other relevant relkinds, currently limited to views, always
4279 : * have a wholerow attribute.
4280 : */
4281 556 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4282 : {
4283 530 : datum = ExecGetJunkAttribute(slot,
4284 530 : resultRelInfo->ri_RowIdAttNo,
4285 : &isNull);
4286 :
4287 : /*
4288 : * For commands other than MERGE, any tuples having a null row
4289 : * identifier are errors. For MERGE, we may need to handle
4290 : * them as WHEN NOT MATCHED clauses if any, so do that.
4291 : *
4292 : * Note that we use the node's toplevel resultRelInfo, not any
4293 : * specific partition's.
4294 : */
4295 530 : if (isNull)
4296 : {
4297 48 : if (operation == CMD_MERGE)
4298 : {
4299 48 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4300 :
4301 48 : slot = ExecMerge(&context, node->resultRelInfo,
4302 48 : NULL, NULL, node->canSetTag);
4303 :
4304 : /*
4305 : * If we got a RETURNING result, return it to the
4306 : * caller. We'll continue the work on next call.
4307 : */
4308 42 : if (slot)
4309 12 : return slot;
4310 :
4311 30 : continue; /* continue with the next tuple */
4312 : }
4313 :
4314 0 : elog(ERROR, "wholerow is NULL");
4315 : }
4316 :
4317 482 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4318 482 : oldtupdata.t_len =
4319 482 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4320 482 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4321 : /* Historically, view triggers see invalid t_tableOid. */
4322 482 : oldtupdata.t_tableOid =
4323 482 : (relkind == RELKIND_VIEW) ? InvalidOid :
4324 206 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4325 :
4326 482 : oldtuple = &oldtupdata;
4327 : }
4328 : else
4329 : {
4330 : /* Only foreign tables are allowed to omit a row-ID attr */
4331 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4332 : }
4333 : }
4334 :
4335 14135382 : switch (operation)
4336 : {
4337 12161782 : case CMD_INSERT:
4338 : /* Initialize projection info if first time for this table */
4339 12161782 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4340 91944 : ExecInitInsertProjection(node, resultRelInfo);
4341 12161782 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4342 12161782 : slot = ExecInsert(&context, resultRelInfo, slot,
4343 12161782 : node->canSetTag, NULL, NULL);
4344 12159680 : break;
4345 :
4346 317242 : case CMD_UPDATE:
4347 317242 : tuplock = false;
4348 :
4349 : /* Initialize projection info if first time for this table */
4350 317242 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4351 13162 : ExecInitUpdateProjection(node, resultRelInfo);
4352 :
4353 : /*
4354 : * Make the new tuple by combining plan's output tuple with
4355 : * the old tuple being updated.
4356 : */
4357 317242 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4358 317242 : if (oldtuple != NULL)
4359 : {
4360 : Assert(!resultRelInfo->ri_needLockTagTuple);
4361 : /* Use the wholerow junk attr as the old tuple. */
4362 314 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4363 : }
4364 : else
4365 : {
4366 : /* Fetch the most recent version of old tuple. */
4367 316928 : Relation relation = resultRelInfo->ri_RelationDesc;
4368 :
4369 316928 : if (resultRelInfo->ri_needLockTagTuple)
4370 : {
4371 24754 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4372 24754 : tuplock = true;
4373 : }
4374 316928 : if (!table_tuple_fetch_row_version(relation, tupleid,
4375 : SnapshotAny,
4376 : oldSlot))
4377 0 : elog(ERROR, "failed to fetch tuple being updated");
4378 : }
4379 317242 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4380 : oldSlot);
4381 :
4382 : /* Now apply the update. */
4383 317242 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4384 317242 : oldSlot, slot, node->canSetTag);
4385 316742 : if (tuplock)
4386 24754 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4387 : InplaceUpdateTupleLock);
4388 316742 : break;
4389 :
4390 1644474 : case CMD_DELETE:
4391 1644474 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4392 1644474 : true, false, node->canSetTag, NULL, NULL, NULL);
4393 1644410 : break;
4394 :
4395 11884 : case CMD_MERGE:
4396 11884 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4397 11884 : node->canSetTag);
4398 11794 : break;
4399 :
4400 0 : default:
4401 0 : elog(ERROR, "unknown operation");
4402 : break;
4403 : }
4404 :
4405 : /*
4406 : * If we got a RETURNING result, return it to caller. We'll continue
4407 : * the work on next call.
4408 : */
4409 14132626 : if (slot)
4410 7152 : return slot;
4411 : }
4412 :
4413 : /*
4414 : * Insert remaining tuples for batch insert.
4415 : */
4416 116744 : if (estate->es_insert_pending_result_relations != NIL)
4417 24 : ExecPendingInserts(estate);
4418 :
4419 : /*
4420 : * We're done, but fire AFTER STATEMENT triggers before exiting.
4421 : */
4422 116744 : fireASTriggers(node);
4423 :
4424 116744 : node->mt_done = true;
4425 :
4426 116744 : return NULL;
4427 : }
4428 :
4429 : /*
4430 : * ExecLookupResultRelByOid
4431 : * If the table with given OID is among the result relations to be
4432 : * updated by the given ModifyTable node, return its ResultRelInfo.
4433 : *
4434 : * If not found, return NULL if missing_ok, else raise error.
4435 : *
4436 : * If update_cache is true, then upon successful lookup, update the node's
4437 : * one-element cache. ONLY ExecModifyTable may pass true for this.
4438 : */
4439 : ResultRelInfo *
4440 11898 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4441 : bool missing_ok, bool update_cache)
4442 : {
4443 11898 : if (node->mt_resultOidHash)
4444 : {
4445 : /* Use the pre-built hash table to locate the rel */
4446 : MTTargetRelLookup *mtlookup;
4447 :
4448 : mtlookup = (MTTargetRelLookup *)
4449 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4450 0 : if (mtlookup)
4451 : {
4452 0 : if (update_cache)
4453 : {
4454 0 : node->mt_lastResultOid = resultoid;
4455 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
4456 : }
4457 0 : return node->resultRelInfo + mtlookup->relationIndex;
4458 : }
4459 : }
4460 : else
4461 : {
4462 : /* With few target rels, just search the ResultRelInfo array */
4463 22678 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4464 : {
4465 14440 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4466 :
4467 14440 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4468 : {
4469 3660 : if (update_cache)
4470 : {
4471 3160 : node->mt_lastResultOid = resultoid;
4472 3160 : node->mt_lastResultIndex = ndx;
4473 : }
4474 3660 : return rInfo;
4475 : }
4476 : }
4477 : }
4478 :
4479 8238 : if (!missing_ok)
4480 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
4481 8238 : return NULL;
4482 : }
4483 :
4484 : /* ----------------------------------------------------------------
4485 : * ExecInitModifyTable
4486 : * ----------------------------------------------------------------
4487 : */
4488 : ModifyTableState *
4489 121360 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4490 : {
4491 : ModifyTableState *mtstate;
4492 121360 : Plan *subplan = outerPlan(node);
4493 121360 : CmdType operation = node->operation;
4494 121360 : int total_nrels = list_length(node->resultRelations);
4495 : int nrels;
4496 121360 : List *resultRelations = NIL;
4497 121360 : List *withCheckOptionLists = NIL;
4498 121360 : List *returningLists = NIL;
4499 121360 : List *updateColnosLists = NIL;
4500 121360 : List *mergeActionLists = NIL;
4501 121360 : List *mergeJoinConditions = NIL;
4502 : ResultRelInfo *resultRelInfo;
4503 : List *arowmarks;
4504 : ListCell *l;
4505 : int i;
4506 : Relation rel;
4507 :
4508 : /* check for unsupported flags */
4509 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4510 :
4511 : /*
4512 : * Only consider unpruned relations for initializing their ResultRelInfo
4513 : * struct and other fields such as withCheckOptions, etc.
4514 : *
4515 : * Note: We must avoid pruning every result relation. This is important
4516 : * for MERGE, since even if every result relation is pruned from the
4517 : * subplan, there might still be NOT MATCHED rows, for which there may be
4518 : * INSERT actions to perform. To allow these actions to be found, at
4519 : * least one result relation must be kept. Also, when inserting into a
4520 : * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct
4521 : * as a reference for building the ResultRelInfo of the target partition.
4522 : * In either case, it doesn't matter which result relation is kept, so we
4523 : * just keep the first one, if all others have been pruned. See also,
4524 : * ExecDoInitialPruning(), which ensures that this first result relation
4525 : * has been locked.
4526 : */
4527 121360 : i = 0;
4528 245214 : foreach(l, node->resultRelations)
4529 : {
4530 123854 : Index rti = lfirst_int(l);
4531 : bool keep_rel;
4532 :
4533 123854 : keep_rel = bms_is_member(rti, estate->es_unpruned_relids);
4534 123854 : if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL)
4535 : {
4536 : /* all result relations pruned; keep the first one */
4537 48 : keep_rel = true;
4538 48 : rti = linitial_int(node->resultRelations);
4539 48 : i = 0;
4540 : }
4541 :
4542 123854 : if (keep_rel)
4543 : {
4544 123770 : resultRelations = lappend_int(resultRelations, rti);
4545 123770 : if (node->withCheckOptionLists)
4546 : {
4547 1412 : List *withCheckOptions = list_nth_node(List,
4548 : node->withCheckOptionLists,
4549 : i);
4550 :
4551 1412 : withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions);
4552 : }
4553 123770 : if (node->returningLists)
4554 : {
4555 4982 : List *returningList = list_nth_node(List,
4556 : node->returningLists,
4557 : i);
4558 :
4559 4982 : returningLists = lappend(returningLists, returningList);
4560 : }
4561 123770 : if (node->updateColnosLists)
4562 : {
4563 15868 : List *updateColnosList = list_nth(node->updateColnosLists, i);
4564 :
4565 15868 : updateColnosLists = lappend(updateColnosLists, updateColnosList);
4566 : }
4567 123770 : if (node->mergeActionLists)
4568 : {
4569 1736 : List *mergeActionList = list_nth(node->mergeActionLists, i);
4570 :
4571 1736 : mergeActionLists = lappend(mergeActionLists, mergeActionList);
4572 : }
4573 123770 : if (node->mergeJoinConditions)
4574 : {
4575 1736 : List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i);
4576 :
4577 1736 : mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition);
4578 : }
4579 : }
4580 123854 : i++;
4581 : }
4582 121360 : nrels = list_length(resultRelations);
4583 : Assert(nrels > 0);
4584 :
4585 : /*
4586 : * create state structure
4587 : */
4588 121360 : mtstate = makeNode(ModifyTableState);
4589 121360 : mtstate->ps.plan = (Plan *) node;
4590 121360 : mtstate->ps.state = estate;
4591 121360 : mtstate->ps.ExecProcNode = ExecModifyTable;
4592 :
4593 121360 : mtstate->operation = operation;
4594 121360 : mtstate->canSetTag = node->canSetTag;
4595 121360 : mtstate->mt_done = false;
4596 :
4597 121360 : mtstate->mt_nrels = nrels;
4598 121360 : mtstate->resultRelInfo = (ResultRelInfo *)
4599 121360 : palloc(nrels * sizeof(ResultRelInfo));
4600 :
4601 121360 : mtstate->mt_merge_pending_not_matched = NULL;
4602 121360 : mtstate->mt_merge_inserted = 0;
4603 121360 : mtstate->mt_merge_updated = 0;
4604 121360 : mtstate->mt_merge_deleted = 0;
4605 121360 : mtstate->mt_updateColnosLists = updateColnosLists;
4606 121360 : mtstate->mt_mergeActionLists = mergeActionLists;
4607 121360 : mtstate->mt_mergeJoinConditions = mergeJoinConditions;
4608 :
4609 : /*----------
4610 : * Resolve the target relation. This is the same as:
4611 : *
4612 : * - the relation for which we will fire FOR STATEMENT triggers,
4613 : * - the relation into whose tuple format all captured transition tuples
4614 : * must be converted, and
4615 : * - the root partitioned table used for tuple routing.
4616 : *
4617 : * If it's a partitioned or inherited table, the root partition or
4618 : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4619 : * given explicitly in node->rootRelation. Otherwise, the target relation
4620 : * is the sole relation in the node->resultRelations list and, since it can
4621 : * never be pruned, also in the resultRelations list constructed above.
4622 : *----------
4623 : */
4624 121360 : if (node->rootRelation > 0)
4625 : {
4626 : Assert(bms_is_member(node->rootRelation, estate->es_unpruned_relids));
4627 2804 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4628 2804 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4629 : node->rootRelation);
4630 : }
4631 : else
4632 : {
4633 : Assert(list_length(node->resultRelations) == 1);
4634 : Assert(list_length(resultRelations) == 1);
4635 118556 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4636 118556 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4637 118556 : linitial_int(resultRelations));
4638 : }
4639 :
4640 : /* set up epqstate with dummy subplan data for the moment */
4641 121360 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4642 : node->epqParam, resultRelations);
4643 121360 : mtstate->fireBSTriggers = true;
4644 :
4645 : /*
4646 : * Build state for collecting transition tuples. This requires having a
4647 : * valid trigger query context, so skip it in explain-only mode.
4648 : */
4649 121360 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4650 120340 : ExecSetupTransitionCaptureState(mtstate, estate);
4651 :
4652 : /*
4653 : * Open all the result relations and initialize the ResultRelInfo structs.
4654 : * (But root relation was initialized above, if it's part of the array.)
4655 : * We must do this before initializing the subplan, because direct-modify
4656 : * FDWs expect their ResultRelInfos to be available.
4657 : */
4658 121360 : resultRelInfo = mtstate->resultRelInfo;
4659 121360 : i = 0;
4660 244806 : foreach(l, resultRelations)
4661 : {
4662 123764 : Index resultRelation = lfirst_int(l);
4663 123764 : List *mergeActions = NIL;
4664 :
4665 123764 : if (mergeActionLists)
4666 1736 : mergeActions = list_nth(mergeActionLists, i);
4667 :
4668 123764 : if (resultRelInfo != mtstate->rootResultRelInfo)
4669 : {
4670 5208 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4671 :
4672 : /*
4673 : * For child result relations, store the root result relation
4674 : * pointer. We do so for the convenience of places that want to
4675 : * look at the query's original target relation but don't have the
4676 : * mtstate handy.
4677 : */
4678 5208 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4679 : }
4680 :
4681 : /* Initialize the usesFdwDirectModify flag */
4682 123764 : resultRelInfo->ri_usesFdwDirectModify =
4683 123764 : bms_is_member(i, node->fdwDirectModifyPlans);
4684 :
4685 : /*
4686 : * Verify result relation is a valid target for the current operation
4687 : */
4688 123764 : CheckValidResultRel(resultRelInfo, operation, mergeActions);
4689 :
4690 123446 : resultRelInfo++;
4691 123446 : i++;
4692 : }
4693 :
4694 : /*
4695 : * Now we may initialize the subplan.
4696 : */
4697 121042 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4698 :
4699 : /*
4700 : * Do additional per-result-relation initialization.
4701 : */
4702 244454 : for (i = 0; i < nrels; i++)
4703 : {
4704 123412 : resultRelInfo = &mtstate->resultRelInfo[i];
4705 :
4706 : /* Let FDWs init themselves for foreign-table result rels */
4707 123412 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4708 123204 : resultRelInfo->ri_FdwRoutine != NULL &&
4709 322 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4710 : {
4711 322 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4712 :
4713 322 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4714 : resultRelInfo,
4715 : fdw_private,
4716 : i,
4717 : eflags);
4718 : }
4719 :
4720 : /*
4721 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4722 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4723 : * tables, the FDW might have created additional junk attr(s), but
4724 : * those are no concern of ours.
4725 : */
4726 123412 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4727 : operation == CMD_MERGE)
4728 : {
4729 : char relkind;
4730 :
4731 30020 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4732 30020 : if (relkind == RELKIND_RELATION ||
4733 680 : relkind == RELKIND_MATVIEW ||
4734 : relkind == RELKIND_PARTITIONED_TABLE)
4735 : {
4736 29376 : resultRelInfo->ri_RowIdAttNo =
4737 29376 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4738 29376 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4739 0 : elog(ERROR, "could not find junk ctid column");
4740 : }
4741 644 : else if (relkind == RELKIND_FOREIGN_TABLE)
4742 : {
4743 : /*
4744 : * We don't support MERGE with foreign tables for now. (It's
4745 : * problematic because the implementation uses CTID.)
4746 : */
4747 : Assert(operation != CMD_MERGE);
4748 :
4749 : /*
4750 : * When there is a row-level trigger, there should be a
4751 : * wholerow attribute. We also require it to be present in
4752 : * UPDATE and MERGE, so we can get the values of unchanged
4753 : * columns.
4754 : */
4755 356 : resultRelInfo->ri_RowIdAttNo =
4756 356 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4757 : "wholerow");
4758 356 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4759 202 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4760 0 : elog(ERROR, "could not find junk wholerow column");
4761 : }
4762 : else
4763 : {
4764 : /* Other valid target relkinds must provide wholerow */
4765 288 : resultRelInfo->ri_RowIdAttNo =
4766 288 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4767 : "wholerow");
4768 288 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4769 0 : elog(ERROR, "could not find junk wholerow column");
4770 : }
4771 : }
4772 : }
4773 :
4774 : /*
4775 : * If this is an inherited update/delete/merge, there will be a junk
4776 : * attribute named "tableoid" present in the subplan's targetlist. It
4777 : * will be used to identify the result relation for a given tuple to be
4778 : * updated/deleted/merged.
4779 : */
4780 121042 : mtstate->mt_resultOidAttno =
4781 121042 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4782 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || total_nrels == 1);
4783 121042 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4784 121042 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4785 :
4786 : /* Get the root target relation */
4787 121042 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4788 :
4789 : /*
4790 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4791 : * or MERGE might need this too, but only if it actually moves tuples
4792 : * between partitions; in that case setup is done by
4793 : * ExecCrossPartitionUpdate.
4794 : */
4795 121042 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4796 : operation == CMD_INSERT)
4797 5868 : mtstate->mt_partition_tuple_routing =
4798 5868 : ExecSetupPartitionTupleRouting(estate, rel);
4799 :
4800 : /*
4801 : * Initialize any WITH CHECK OPTION constraints if needed.
4802 : */
4803 121042 : resultRelInfo = mtstate->resultRelInfo;
4804 122454 : foreach(l, withCheckOptionLists)
4805 : {
4806 1412 : List *wcoList = (List *) lfirst(l);
4807 1412 : List *wcoExprs = NIL;
4808 : ListCell *ll;
4809 :
4810 3824 : foreach(ll, wcoList)
4811 : {
4812 2412 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4813 2412 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4814 : &mtstate->ps);
4815 :
4816 2412 : wcoExprs = lappend(wcoExprs, wcoExpr);
4817 : }
4818 :
4819 1412 : resultRelInfo->ri_WithCheckOptions = wcoList;
4820 1412 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4821 1412 : resultRelInfo++;
4822 : }
4823 :
4824 : /*
4825 : * Initialize RETURNING projections if needed.
4826 : */
4827 121042 : if (returningLists)
4828 : {
4829 : TupleTableSlot *slot;
4830 : ExprContext *econtext;
4831 :
4832 : /*
4833 : * Initialize result tuple slot and assign its rowtype using the first
4834 : * RETURNING list. We assume the rest will look the same.
4835 : */
4836 4636 : mtstate->ps.plan->targetlist = (List *) linitial(returningLists);
4837 :
4838 : /* Set up a slot for the output of the RETURNING projection(s) */
4839 4636 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
4840 4636 : slot = mtstate->ps.ps_ResultTupleSlot;
4841 :
4842 : /* Need an econtext too */
4843 4636 : if (mtstate->ps.ps_ExprContext == NULL)
4844 4636 : ExecAssignExprContext(estate, &mtstate->ps);
4845 4636 : econtext = mtstate->ps.ps_ExprContext;
4846 :
4847 : /*
4848 : * Build a projection for each result rel.
4849 : */
4850 4636 : resultRelInfo = mtstate->resultRelInfo;
4851 9618 : foreach(l, returningLists)
4852 : {
4853 4982 : List *rlist = (List *) lfirst(l);
4854 :
4855 4982 : resultRelInfo->ri_returningList = rlist;
4856 4982 : resultRelInfo->ri_projectReturning =
4857 4982 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
4858 4982 : resultRelInfo->ri_RelationDesc->rd_att);
4859 4982 : resultRelInfo++;
4860 : }
4861 : }
4862 : else
4863 : {
4864 : /*
4865 : * We still must construct a dummy result tuple type, because InitPlan
4866 : * expects one (maybe should change that?).
4867 : */
4868 116406 : mtstate->ps.plan->targetlist = NIL;
4869 116406 : ExecInitResultTypeTL(&mtstate->ps);
4870 :
4871 116406 : mtstate->ps.ps_ExprContext = NULL;
4872 : }
4873 :
4874 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
4875 121042 : resultRelInfo = mtstate->resultRelInfo;
4876 121042 : if (node->onConflictAction != ONCONFLICT_NONE)
4877 : {
4878 : /* insert may only have one relation, inheritance is not expanded */
4879 : Assert(total_nrels == 1);
4880 1356 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4881 : }
4882 :
4883 : /*
4884 : * If needed, Initialize target list, projection and qual for ON CONFLICT
4885 : * DO UPDATE.
4886 : */
4887 121042 : if (node->onConflictAction == ONCONFLICT_UPDATE)
4888 : {
4889 912 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
4890 : ExprContext *econtext;
4891 : TupleDesc relationDesc;
4892 :
4893 : /* already exists if created by RETURNING processing above */
4894 912 : if (mtstate->ps.ps_ExprContext == NULL)
4895 632 : ExecAssignExprContext(estate, &mtstate->ps);
4896 :
4897 912 : econtext = mtstate->ps.ps_ExprContext;
4898 912 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4899 :
4900 : /* create state for DO UPDATE SET operation */
4901 912 : resultRelInfo->ri_onConflict = onconfl;
4902 :
4903 : /* initialize slot for the existing tuple */
4904 912 : onconfl->oc_Existing =
4905 912 : table_slot_create(resultRelInfo->ri_RelationDesc,
4906 912 : &mtstate->ps.state->es_tupleTable);
4907 :
4908 : /*
4909 : * Create the tuple slot for the UPDATE SET projection. We want a slot
4910 : * of the table's type here, because the slot will be used to insert
4911 : * into the table, and for RETURNING processing - which may access
4912 : * system attributes.
4913 : */
4914 912 : onconfl->oc_ProjSlot =
4915 912 : table_slot_create(resultRelInfo->ri_RelationDesc,
4916 912 : &mtstate->ps.state->es_tupleTable);
4917 :
4918 : /* build UPDATE SET projection state */
4919 912 : onconfl->oc_ProjInfo =
4920 912 : ExecBuildUpdateProjection(node->onConflictSet,
4921 : true,
4922 : node->onConflictCols,
4923 : relationDesc,
4924 : econtext,
4925 : onconfl->oc_ProjSlot,
4926 : &mtstate->ps);
4927 :
4928 : /* initialize state to evaluate the WHERE clause, if any */
4929 912 : if (node->onConflictWhere)
4930 : {
4931 : ExprState *qualexpr;
4932 :
4933 176 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
4934 : &mtstate->ps);
4935 176 : onconfl->oc_WhereClause = qualexpr;
4936 : }
4937 : }
4938 :
4939 : /*
4940 : * If we have any secondary relations in an UPDATE or DELETE, they need to
4941 : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
4942 : * EvalPlanQual mechanism needs to be told about them. This also goes for
4943 : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
4944 : */
4945 121042 : arowmarks = NIL;
4946 123852 : foreach(l, node->rowMarks)
4947 : {
4948 2810 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
4949 : ExecRowMark *erm;
4950 : ExecAuxRowMark *aerm;
4951 :
4952 : /*
4953 : * Ignore "parent" rowmarks, because they are irrelevant at runtime.
4954 : * Also ignore the rowmarks belonging to child tables that have been
4955 : * pruned in ExecDoInitialPruning().
4956 : */
4957 2810 : if (rc->isParent ||
4958 2668 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
4959 596 : continue;
4960 :
4961 : /* Find ExecRowMark and build ExecAuxRowMark */
4962 2214 : erm = ExecFindRowMark(estate, rc->rti, false);
4963 2214 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4964 2214 : arowmarks = lappend(arowmarks, aerm);
4965 : }
4966 :
4967 : /* For a MERGE command, initialize its state */
4968 121042 : if (mtstate->operation == CMD_MERGE)
4969 1504 : ExecInitMerge(mtstate, estate);
4970 :
4971 121042 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4972 :
4973 : /*
4974 : * If there are a lot of result relations, use a hash table to speed the
4975 : * lookups. If there are not a lot, a simple linear search is faster.
4976 : *
4977 : * It's not clear where the threshold is, but try 64 for starters. In a
4978 : * debugging build, use a small threshold so that we get some test
4979 : * coverage of both code paths.
4980 : */
4981 : #ifdef USE_ASSERT_CHECKING
4982 : #define MT_NRELS_HASH 4
4983 : #else
4984 : #define MT_NRELS_HASH 64
4985 : #endif
4986 121042 : if (nrels >= MT_NRELS_HASH)
4987 : {
4988 : HASHCTL hash_ctl;
4989 :
4990 0 : hash_ctl.keysize = sizeof(Oid);
4991 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4992 0 : hash_ctl.hcxt = CurrentMemoryContext;
4993 0 : mtstate->mt_resultOidHash =
4994 0 : hash_create("ModifyTable target hash",
4995 : nrels, &hash_ctl,
4996 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
4997 0 : for (i = 0; i < nrels; i++)
4998 : {
4999 : Oid hashkey;
5000 : MTTargetRelLookup *mtlookup;
5001 : bool found;
5002 :
5003 0 : resultRelInfo = &mtstate->resultRelInfo[i];
5004 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
5005 : mtlookup = (MTTargetRelLookup *)
5006 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
5007 : HASH_ENTER, &found);
5008 : Assert(!found);
5009 0 : mtlookup->relationIndex = i;
5010 : }
5011 : }
5012 : else
5013 121042 : mtstate->mt_resultOidHash = NULL;
5014 :
5015 : /*
5016 : * Determine if the FDW supports batch insert and determine the batch size
5017 : * (a FDW may support batching, but it may be disabled for the
5018 : * server/table).
5019 : *
5020 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
5021 : * remains set to 0.
5022 : */
5023 121042 : if (operation == CMD_INSERT)
5024 : {
5025 : /* insert may only have one relation, inheritance is not expanded */
5026 : Assert(total_nrels == 1);
5027 93392 : resultRelInfo = mtstate->resultRelInfo;
5028 93392 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5029 93392 : resultRelInfo->ri_FdwRoutine != NULL &&
5030 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
5031 174 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
5032 : {
5033 174 : resultRelInfo->ri_BatchSize =
5034 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
5035 174 : Assert(resultRelInfo->ri_BatchSize >= 1);
5036 : }
5037 : else
5038 93218 : resultRelInfo->ri_BatchSize = 1;
5039 : }
5040 :
5041 : /*
5042 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
5043 : * to estate->es_auxmodifytables so that it will be run to completion by
5044 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
5045 : * ModifyTable node too, but there's no need.) Note the use of lcons not
5046 : * lappend: we need later-initialized ModifyTable nodes to be shut down
5047 : * before earlier ones. This ensures that we don't throw away RETURNING
5048 : * rows that need to be seen by a later CTE subplan.
5049 : */
5050 121042 : if (!mtstate->canSetTag)
5051 942 : estate->es_auxmodifytables = lcons(mtstate,
5052 : estate->es_auxmodifytables);
5053 :
5054 121042 : return mtstate;
5055 : }
5056 :
5057 : /* ----------------------------------------------------------------
5058 : * ExecEndModifyTable
5059 : *
5060 : * Shuts down the plan.
5061 : *
5062 : * Returns nothing of interest.
5063 : * ----------------------------------------------------------------
5064 : */
5065 : void
5066 116726 : ExecEndModifyTable(ModifyTableState *node)
5067 : {
5068 : int i;
5069 :
5070 : /*
5071 : * Allow any FDWs to shut down
5072 : */
5073 235520 : for (i = 0; i < node->mt_nrels; i++)
5074 : {
5075 : int j;
5076 118794 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
5077 :
5078 118794 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5079 118602 : resultRelInfo->ri_FdwRoutine != NULL &&
5080 302 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
5081 302 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
5082 : resultRelInfo);
5083 :
5084 : /*
5085 : * Cleanup the initialized batch slots. This only matters for FDWs
5086 : * with batching, but the other cases will have ri_NumSlotsInitialized
5087 : * == 0.
5088 : */
5089 118850 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
5090 : {
5091 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
5092 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
5093 : }
5094 : }
5095 :
5096 : /*
5097 : * Close all the partitioned tables, leaf partitions, and their indices
5098 : * and release the slot used for tuple routing, if set.
5099 : */
5100 116726 : if (node->mt_partition_tuple_routing)
5101 : {
5102 5946 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
5103 :
5104 5946 : if (node->mt_root_tuple_slot)
5105 644 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
5106 : }
5107 :
5108 : /*
5109 : * Terminate EPQ execution if active
5110 : */
5111 116726 : EvalPlanQualEnd(&node->mt_epqstate);
5112 :
5113 : /*
5114 : * shut down subplan
5115 : */
5116 116726 : ExecEndNode(outerPlanState(node));
5117 116726 : }
5118 :
5119 : void
5120 0 : ExecReScanModifyTable(ModifyTableState *node)
5121 : {
5122 : /*
5123 : * Currently, we don't need to support rescan on ModifyTable nodes. The
5124 : * semantics of that would be a bit debatable anyway.
5125 : */
5126 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
5127 : }
|