Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * The relation to modify can be an ordinary table, a foreign table, or a
28 : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : * targeted a view not in one of those two categories, earlier processing
31 : * already pointed the ModifyTable result relation to an underlying
32 : * relation of that other view. This node does process
33 : * ri_WithCheckOptions, which may have expressions from those other,
34 : * automatically updatable views.
35 : *
36 : * MERGE runs a join between the source relation and the target table.
37 : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : * is an outer join that might output tuples without a matching target
39 : * tuple. In this case, any unmatched target tuples will have NULL
40 : * row-locating info, and only INSERT can be run. But for matched target
41 : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : * SOURCE, all tuples produced by the join will include a matching target
44 : * tuple, so all tuples contain row-locating info.
45 : *
46 : * If the query specifies RETURNING, then the ModifyTable returns a
47 : * RETURNING tuple after completing each row insert, update, or delete.
48 : * It must be called again to continue the operation. Without RETURNING,
49 : * we just loop within the node until all the work is done, then
50 : * return NULL. This avoids useless call/return overhead.
51 : */
52 :
53 : #include "postgres.h"
54 :
55 : #include "access/htup_details.h"
56 : #include "access/tableam.h"
57 : #include "access/xact.h"
58 : #include "commands/trigger.h"
59 : #include "executor/execPartition.h"
60 : #include "executor/executor.h"
61 : #include "executor/nodeModifyTable.h"
62 : #include "foreign/fdwapi.h"
63 : #include "miscadmin.h"
64 : #include "nodes/nodeFuncs.h"
65 : #include "optimizer/optimizer.h"
66 : #include "rewrite/rewriteHandler.h"
67 : #include "storage/lmgr.h"
68 : #include "utils/builtins.h"
69 : #include "utils/datum.h"
70 : #include "utils/rel.h"
71 : #include "utils/snapmgr.h"
72 :
73 :
74 : typedef struct MTTargetRelLookup
75 : {
76 : Oid relationOid; /* hash key, must be first */
77 : int relationIndex; /* rel's index in resultRelInfo[] array */
78 : } MTTargetRelLookup;
79 :
80 : /*
81 : * Context struct for a ModifyTable operation, containing basic execution
82 : * state and some output variables populated by ExecUpdateAct() and
83 : * ExecDeleteAct() to report the result of their actions to callers.
84 : */
85 : typedef struct ModifyTableContext
86 : {
87 : /* Operation state */
88 : ModifyTableState *mtstate;
89 : EPQState *epqstate;
90 : EState *estate;
91 :
92 : /*
93 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
94 : * access "junk" columns that are not going to be stored.
95 : */
96 : TupleTableSlot *planSlot;
97 :
98 : /*
99 : * Information about the changes that were made concurrently to a tuple
100 : * being updated or deleted
101 : */
102 : TM_FailureData tmfd;
103 :
104 : /*
105 : * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
106 : * clause that refers to OLD columns (converted to the root's tuple
107 : * descriptor).
108 : */
109 : TupleTableSlot *cpDeletedSlot;
110 :
111 : /*
112 : * The tuple projected by the INSERT's RETURNING clause, when doing a
113 : * cross-partition UPDATE
114 : */
115 : TupleTableSlot *cpUpdateReturningSlot;
116 : } ModifyTableContext;
117 :
118 : /*
119 : * Context struct containing output data specific to UPDATE operations.
120 : */
121 : typedef struct UpdateContext
122 : {
123 : bool crossPartUpdate; /* was it a cross-partition update? */
124 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
125 :
126 : /*
127 : * Lock mode to acquire on the latest tuple version before performing
128 : * EvalPlanQual on it
129 : */
130 : LockTupleMode lockmode;
131 : } UpdateContext;
132 :
133 :
134 : static void ExecBatchInsert(ModifyTableState *mtstate,
135 : ResultRelInfo *resultRelInfo,
136 : TupleTableSlot **slots,
137 : TupleTableSlot **planSlots,
138 : int numSlots,
139 : EState *estate,
140 : bool canSetTag);
141 : static void ExecPendingInserts(EState *estate);
142 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
143 : ResultRelInfo *sourcePartInfo,
144 : ResultRelInfo *destPartInfo,
145 : ItemPointer tupleid,
146 : TupleTableSlot *oldslot,
147 : TupleTableSlot *newslot);
148 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
149 : ResultRelInfo *resultRelInfo,
150 : ItemPointer conflictTid,
151 : TupleTableSlot *excludedSlot,
152 : bool canSetTag,
153 : TupleTableSlot **returning);
154 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
155 : EState *estate,
156 : PartitionTupleRouting *proute,
157 : ResultRelInfo *targetRelInfo,
158 : TupleTableSlot *slot,
159 : ResultRelInfo **partRelInfo);
160 :
161 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
162 : ResultRelInfo *resultRelInfo,
163 : ItemPointer tupleid,
164 : HeapTuple oldtuple,
165 : bool canSetTag);
166 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
167 : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
168 : ResultRelInfo *resultRelInfo,
169 : ItemPointer tupleid,
170 : HeapTuple oldtuple,
171 : bool canSetTag,
172 : bool *matched);
173 : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
174 : ResultRelInfo *resultRelInfo,
175 : bool canSetTag);
176 :
177 :
178 : /*
179 : * Verify that the tuples to be produced by INSERT match the
180 : * target relation's rowtype
181 : *
182 : * We do this to guard against stale plans. If plan invalidation is
183 : * functioning properly then we should never get a failure here, but better
184 : * safe than sorry. Note that this is called after we have obtained lock
185 : * on the target rel, so the rowtype can't change underneath us.
186 : *
187 : * The plan output is represented by its targetlist, because that makes
188 : * handling the dropped-column case easier.
189 : *
190 : * We used to use this for UPDATE as well, but now the equivalent checks
191 : * are done in ExecBuildUpdateProjection.
192 : */
193 : static void
194 92504 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
195 : {
196 92504 : TupleDesc resultDesc = RelationGetDescr(resultRel);
197 92504 : int attno = 0;
198 : ListCell *lc;
199 :
200 283442 : foreach(lc, targetList)
201 : {
202 190938 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
203 : Form_pg_attribute attr;
204 :
205 : Assert(!tle->resjunk); /* caller removed junk items already */
206 :
207 190938 : if (attno >= resultDesc->natts)
208 0 : ereport(ERROR,
209 : (errcode(ERRCODE_DATATYPE_MISMATCH),
210 : errmsg("table row type and query-specified row type do not match"),
211 : errdetail("Query has too many columns.")));
212 190938 : attr = TupleDescAttr(resultDesc, attno);
213 190938 : attno++;
214 :
215 190938 : if (!attr->attisdropped)
216 : {
217 : /* Normal case: demand type match */
218 190316 : if (exprType((Node *) tle->expr) != attr->atttypid)
219 0 : ereport(ERROR,
220 : (errcode(ERRCODE_DATATYPE_MISMATCH),
221 : errmsg("table row type and query-specified row type do not match"),
222 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
223 : format_type_be(attr->atttypid),
224 : attno,
225 : format_type_be(exprType((Node *) tle->expr)))));
226 : }
227 : else
228 : {
229 : /*
230 : * For a dropped column, we can't check atttypid (it's likely 0).
231 : * In any case the planner has most likely inserted an INT4 null.
232 : * What we insist on is just *some* NULL constant.
233 : */
234 622 : if (!IsA(tle->expr, Const) ||
235 622 : !((Const *) tle->expr)->constisnull)
236 0 : ereport(ERROR,
237 : (errcode(ERRCODE_DATATYPE_MISMATCH),
238 : errmsg("table row type and query-specified row type do not match"),
239 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
240 : attno)));
241 : }
242 : }
243 92504 : if (attno != resultDesc->natts)
244 0 : ereport(ERROR,
245 : (errcode(ERRCODE_DATATYPE_MISMATCH),
246 : errmsg("table row type and query-specified row type do not match"),
247 : errdetail("Query has too few columns.")));
248 92504 : }
249 :
250 : /*
251 : * ExecProcessReturning --- evaluate a RETURNING list
252 : *
253 : * context: context for the ModifyTable operation
254 : * resultRelInfo: current result rel
255 : * cmdType: operation/merge action performed (INSERT, UPDATE, or DELETE)
256 : * oldSlot: slot holding old tuple deleted or updated
257 : * newSlot: slot holding new tuple inserted or updated
258 : * planSlot: slot holding tuple returned by top subplan node
259 : *
260 : * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
261 : * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
262 : * modify is disabled if the RETURNING list refers to any OLD/NEW values).
263 : *
264 : * Returns a slot holding the result tuple
265 : */
266 : static TupleTableSlot *
267 8004 : ExecProcessReturning(ModifyTableContext *context,
268 : ResultRelInfo *resultRelInfo,
269 : CmdType cmdType,
270 : TupleTableSlot *oldSlot,
271 : TupleTableSlot *newSlot,
272 : TupleTableSlot *planSlot)
273 : {
274 8004 : EState *estate = context->estate;
275 8004 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
276 8004 : ExprContext *econtext = projectReturning->pi_exprContext;
277 :
278 : /* Make tuple and any needed join variables available to ExecProject */
279 8004 : switch (cmdType)
280 : {
281 6592 : case CMD_INSERT:
282 : case CMD_UPDATE:
283 : /* return new tuple by default */
284 6592 : if (newSlot)
285 6136 : econtext->ecxt_scantuple = newSlot;
286 6592 : break;
287 :
288 1412 : case CMD_DELETE:
289 : /* return old tuple by default */
290 1412 : if (oldSlot)
291 1174 : econtext->ecxt_scantuple = oldSlot;
292 1412 : break;
293 :
294 0 : default:
295 0 : elog(ERROR, "unrecognized commandType: %d", (int) cmdType);
296 : }
297 8004 : econtext->ecxt_outertuple = planSlot;
298 :
299 : /* Make old/new tuples available to ExecProject, if required */
300 8004 : if (oldSlot)
301 3774 : econtext->ecxt_oldtuple = oldSlot;
302 4230 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
303 180 : econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
304 : else
305 4050 : econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
306 :
307 8004 : if (newSlot)
308 6136 : econtext->ecxt_newtuple = newSlot;
309 1868 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
310 132 : econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
311 : else
312 1736 : econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
313 :
314 : /*
315 : * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
316 : * information is required to evaluate ReturningExpr nodes and also in
317 : * ExecEvalSysVar() and ExecEvalWholeRowVar().
318 : */
319 8004 : if (oldSlot == NULL)
320 4230 : projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
321 : else
322 3774 : projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
323 :
324 8004 : if (newSlot == NULL)
325 1868 : projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
326 : else
327 6136 : projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
328 :
329 : /* Compute the RETURNING expressions */
330 8004 : return ExecProject(projectReturning);
331 : }
332 :
333 : /*
334 : * ExecCheckTupleVisible -- verify tuple is visible
335 : *
336 : * It would not be consistent with guarantees of the higher isolation levels to
337 : * proceed with avoiding insertion (taking speculative insertion's alternative
338 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
339 : * Check for the need to raise a serialization failure, and do so as necessary.
340 : */
341 : static void
342 5246 : ExecCheckTupleVisible(EState *estate,
343 : Relation rel,
344 : TupleTableSlot *slot)
345 : {
346 5246 : if (!IsolationUsesXactSnapshot())
347 5182 : return;
348 :
349 64 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
350 : {
351 : Datum xminDatum;
352 : TransactionId xmin;
353 : bool isnull;
354 :
355 40 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
356 : Assert(!isnull);
357 40 : xmin = DatumGetTransactionId(xminDatum);
358 :
359 : /*
360 : * We should not raise a serialization failure if the conflict is
361 : * against a tuple inserted by our own transaction, even if it's not
362 : * visible to our snapshot. (This would happen, for example, if
363 : * conflicting keys are proposed for insertion in a single command.)
364 : */
365 40 : if (!TransactionIdIsCurrentTransactionId(xmin))
366 20 : ereport(ERROR,
367 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
368 : errmsg("could not serialize access due to concurrent update")));
369 : }
370 : }
371 :
372 : /*
373 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
374 : */
375 : static void
376 212 : ExecCheckTIDVisible(EState *estate,
377 : ResultRelInfo *relinfo,
378 : ItemPointer tid,
379 : TupleTableSlot *tempSlot)
380 : {
381 212 : Relation rel = relinfo->ri_RelationDesc;
382 :
383 : /* Redundantly check isolation level */
384 212 : if (!IsolationUsesXactSnapshot())
385 148 : return;
386 :
387 64 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
388 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
389 64 : ExecCheckTupleVisible(estate, rel, tempSlot);
390 44 : ExecClearTuple(tempSlot);
391 : }
392 :
393 : /*
394 : * Initialize generated columns handling for a tuple
395 : *
396 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or
397 : * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
398 : * This is used only for stored generated columns.
399 : *
400 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
401 : * This is used by both stored and virtual generated columns.
402 : *
403 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
404 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
405 : * cross-partition UPDATEs, since a partition might be the target of both
406 : * UPDATE and INSERT actions.
407 : */
408 : void
409 59130 : ExecInitGenerated(ResultRelInfo *resultRelInfo,
410 : EState *estate,
411 : CmdType cmdtype)
412 : {
413 59130 : Relation rel = resultRelInfo->ri_RelationDesc;
414 59130 : TupleDesc tupdesc = RelationGetDescr(rel);
415 59130 : int natts = tupdesc->natts;
416 : ExprState **ri_GeneratedExprs;
417 : int ri_NumGeneratedNeeded;
418 : Bitmapset *updatedCols;
419 : MemoryContext oldContext;
420 :
421 : /* Nothing to do if no generated columns */
422 59130 : if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual)))
423 58114 : return;
424 :
425 : /*
426 : * In an UPDATE, we can skip computing any generated columns that do not
427 : * depend on any UPDATE target column. But if there is a BEFORE ROW
428 : * UPDATE trigger, we cannot skip because the trigger might change more
429 : * columns.
430 : */
431 1016 : if (cmdtype == CMD_UPDATE &&
432 250 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
433 206 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
434 : else
435 810 : updatedCols = NULL;
436 :
437 : /*
438 : * Make sure these data structures are built in the per-query memory
439 : * context so they'll survive throughout the query.
440 : */
441 1016 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
442 :
443 1016 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
444 1016 : ri_NumGeneratedNeeded = 0;
445 :
446 3940 : for (int i = 0; i < natts; i++)
447 : {
448 2930 : char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated;
449 :
450 2930 : if (attgenerated)
451 : {
452 : Expr *expr;
453 :
454 : /* Fetch the GENERATED AS expression tree */
455 1100 : expr = (Expr *) build_column_default(rel, i + 1);
456 1100 : if (expr == NULL)
457 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
458 : i + 1, RelationGetRelationName(rel));
459 :
460 : /*
461 : * If it's an update with a known set of update target columns,
462 : * see if we can skip the computation.
463 : */
464 1100 : if (updatedCols)
465 : {
466 220 : Bitmapset *attrs_used = NULL;
467 :
468 220 : pull_varattnos((Node *) expr, 1, &attrs_used);
469 :
470 220 : if (!bms_overlap(updatedCols, attrs_used))
471 24 : continue; /* need not update this column */
472 : }
473 :
474 : /* No luck, so prepare the expression for execution */
475 1076 : if (attgenerated == ATTRIBUTE_GENERATED_STORED)
476 : {
477 992 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
478 986 : ri_NumGeneratedNeeded++;
479 : }
480 :
481 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
482 1070 : if (cmdtype == CMD_UPDATE)
483 248 : resultRelInfo->ri_extraUpdatedCols =
484 248 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
485 : i + 1 - FirstLowInvalidHeapAttributeNumber);
486 : }
487 : }
488 :
489 1010 : if (ri_NumGeneratedNeeded == 0)
490 : {
491 : /* didn't need it after all */
492 42 : pfree(ri_GeneratedExprs);
493 42 : ri_GeneratedExprs = NULL;
494 : }
495 :
496 : /* Save in appropriate set of fields */
497 1010 : if (cmdtype == CMD_UPDATE)
498 : {
499 : /* Don't call twice */
500 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
501 :
502 250 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
503 250 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
504 :
505 250 : resultRelInfo->ri_extraUpdatedCols_valid = true;
506 : }
507 : else
508 : {
509 : /* Don't call twice */
510 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
511 :
512 760 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
513 760 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
514 : }
515 :
516 1010 : MemoryContextSwitchTo(oldContext);
517 : }
518 :
519 : /*
520 : * Compute stored generated columns for a tuple
521 : */
522 : void
523 1358 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
524 : EState *estate, TupleTableSlot *slot,
525 : CmdType cmdtype)
526 : {
527 1358 : Relation rel = resultRelInfo->ri_RelationDesc;
528 1358 : TupleDesc tupdesc = RelationGetDescr(rel);
529 1358 : int natts = tupdesc->natts;
530 1358 : ExprContext *econtext = GetPerTupleExprContext(estate);
531 : ExprState **ri_GeneratedExprs;
532 : MemoryContext oldContext;
533 : Datum *values;
534 : bool *nulls;
535 :
536 : /* We should not be called unless this is true */
537 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
538 :
539 : /*
540 : * Initialize the expressions if we didn't already, and check whether we
541 : * can exit early because nothing needs to be computed.
542 : */
543 1358 : if (cmdtype == CMD_UPDATE)
544 : {
545 262 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
546 200 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
547 262 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
548 18 : return;
549 244 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
550 : }
551 : else
552 : {
553 1096 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
554 766 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
555 : /* Early exit is impossible given the prior Assert */
556 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
557 1090 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
558 : }
559 :
560 1334 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
561 :
562 1334 : values = palloc(sizeof(*values) * natts);
563 1334 : nulls = palloc(sizeof(*nulls) * natts);
564 :
565 1334 : slot_getallattrs(slot);
566 1334 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
567 :
568 5036 : for (int i = 0; i < natts; i++)
569 : {
570 3720 : CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
571 :
572 3720 : if (ri_GeneratedExprs[i])
573 : {
574 : Datum val;
575 : bool isnull;
576 :
577 : Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
578 :
579 1356 : econtext->ecxt_scantuple = slot;
580 :
581 1356 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
582 :
583 : /*
584 : * We must make a copy of val as we have no guarantees about where
585 : * memory for a pass-by-reference Datum is located.
586 : */
587 1338 : if (!isnull)
588 1290 : val = datumCopy(val, attr->attbyval, attr->attlen);
589 :
590 1338 : values[i] = val;
591 1338 : nulls[i] = isnull;
592 : }
593 : else
594 : {
595 2364 : if (!nulls[i])
596 2222 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
597 : }
598 : }
599 :
600 1316 : ExecClearTuple(slot);
601 1316 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
602 1316 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
603 1316 : ExecStoreVirtualTuple(slot);
604 1316 : ExecMaterializeSlot(slot);
605 :
606 1316 : MemoryContextSwitchTo(oldContext);
607 : }
608 :
609 : /*
610 : * ExecInitInsertProjection
611 : * Do one-time initialization of projection data for INSERT tuples.
612 : *
613 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
614 : *
615 : * This is also a convenient place to verify that the
616 : * output of an INSERT matches the target table.
617 : */
618 : static void
619 91488 : ExecInitInsertProjection(ModifyTableState *mtstate,
620 : ResultRelInfo *resultRelInfo)
621 : {
622 91488 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
623 91488 : Plan *subplan = outerPlan(node);
624 91488 : EState *estate = mtstate->ps.state;
625 91488 : List *insertTargetList = NIL;
626 91488 : bool need_projection = false;
627 : ListCell *l;
628 :
629 : /* Extract non-junk columns of the subplan's result tlist. */
630 279820 : foreach(l, subplan->targetlist)
631 : {
632 188332 : TargetEntry *tle = (TargetEntry *) lfirst(l);
633 :
634 188332 : if (!tle->resjunk)
635 188332 : insertTargetList = lappend(insertTargetList, tle);
636 : else
637 0 : need_projection = true;
638 : }
639 :
640 : /*
641 : * The junk-free list must produce a tuple suitable for the result
642 : * relation.
643 : */
644 91488 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
645 :
646 : /* We'll need a slot matching the table's format. */
647 91488 : resultRelInfo->ri_newTupleSlot =
648 91488 : table_slot_create(resultRelInfo->ri_RelationDesc,
649 : &estate->es_tupleTable);
650 :
651 : /* Build ProjectionInfo if needed (it probably isn't). */
652 91488 : if (need_projection)
653 : {
654 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
655 :
656 : /* need an expression context to do the projection */
657 0 : if (mtstate->ps.ps_ExprContext == NULL)
658 0 : ExecAssignExprContext(estate, &mtstate->ps);
659 :
660 0 : resultRelInfo->ri_projectNew =
661 0 : ExecBuildProjectionInfo(insertTargetList,
662 : mtstate->ps.ps_ExprContext,
663 : resultRelInfo->ri_newTupleSlot,
664 : &mtstate->ps,
665 : relDesc);
666 : }
667 :
668 91488 : resultRelInfo->ri_projectNewInfoValid = true;
669 91488 : }
670 :
671 : /*
672 : * ExecInitUpdateProjection
673 : * Do one-time initialization of projection data for UPDATE tuples.
674 : *
675 : * UPDATE always needs a projection, because (1) there's always some junk
676 : * attrs, and (2) we may need to merge values of not-updated columns from
677 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
678 : * the subplan contains only new values for the changed columns, plus row
679 : * identity info in the junk attrs.
680 : *
681 : * This is "one-time" for any given result rel, but we might touch more than
682 : * one result rel in the course of an inherited UPDATE, and each one needs
683 : * its own projection due to possible column order variation.
684 : *
685 : * This is also a convenient place to verify that the output of an UPDATE
686 : * matches the target table (ExecBuildUpdateProjection does that).
687 : */
688 : static void
689 13078 : ExecInitUpdateProjection(ModifyTableState *mtstate,
690 : ResultRelInfo *resultRelInfo)
691 : {
692 13078 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
693 13078 : Plan *subplan = outerPlan(node);
694 13078 : EState *estate = mtstate->ps.state;
695 13078 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
696 : int whichrel;
697 : List *updateColnos;
698 :
699 : /*
700 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
701 : * to, we can get the index the hard way with an integer division.
702 : */
703 13078 : whichrel = mtstate->mt_lastResultIndex;
704 13078 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
705 : {
706 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
707 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
708 : }
709 :
710 13078 : updateColnos = (List *) list_nth(mtstate->mt_updateColnosLists, whichrel);
711 :
712 : /*
713 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
714 : * produced by the subplan to get the new tuple. We need two slots, both
715 : * matching the table's desired format.
716 : */
717 13078 : resultRelInfo->ri_oldTupleSlot =
718 13078 : table_slot_create(resultRelInfo->ri_RelationDesc,
719 : &estate->es_tupleTable);
720 13078 : resultRelInfo->ri_newTupleSlot =
721 13078 : table_slot_create(resultRelInfo->ri_RelationDesc,
722 : &estate->es_tupleTable);
723 :
724 : /* need an expression context to do the projection */
725 13078 : if (mtstate->ps.ps_ExprContext == NULL)
726 11702 : ExecAssignExprContext(estate, &mtstate->ps);
727 :
728 13078 : resultRelInfo->ri_projectNew =
729 13078 : ExecBuildUpdateProjection(subplan->targetlist,
730 : false, /* subplan did the evaluation */
731 : updateColnos,
732 : relDesc,
733 : mtstate->ps.ps_ExprContext,
734 : resultRelInfo->ri_newTupleSlot,
735 : &mtstate->ps);
736 :
737 13078 : resultRelInfo->ri_projectNewInfoValid = true;
738 13078 : }
739 :
740 : /*
741 : * ExecGetInsertNewTuple
742 : * This prepares a "new" tuple ready to be inserted into given result
743 : * relation, by removing any junk columns of the plan's output tuple
744 : * and (if necessary) coercing the tuple to the right tuple format.
745 : */
746 : static TupleTableSlot *
747 11588890 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
748 : TupleTableSlot *planSlot)
749 : {
750 11588890 : ProjectionInfo *newProj = relinfo->ri_projectNew;
751 : ExprContext *econtext;
752 :
753 : /*
754 : * If there's no projection to be done, just make sure the slot is of the
755 : * right type for the target rel. If the planSlot is the right type we
756 : * can use it as-is, else copy the data into ri_newTupleSlot.
757 : */
758 11588890 : if (newProj == NULL)
759 : {
760 11588890 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
761 : {
762 10811856 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
763 10811856 : return relinfo->ri_newTupleSlot;
764 : }
765 : else
766 777034 : return planSlot;
767 : }
768 :
769 : /*
770 : * Else project; since the projection output slot is ri_newTupleSlot, this
771 : * will also fix any slot-type problem.
772 : *
773 : * Note: currently, this is dead code, because INSERT cases don't receive
774 : * any junk columns so there's never a projection to be done.
775 : */
776 0 : econtext = newProj->pi_exprContext;
777 0 : econtext->ecxt_outertuple = planSlot;
778 0 : return ExecProject(newProj);
779 : }
780 :
781 : /*
782 : * ExecGetUpdateNewTuple
783 : * This prepares a "new" tuple by combining an UPDATE subplan's output
784 : * tuple (which contains values of changed columns) with unchanged
785 : * columns taken from the old tuple.
786 : *
787 : * The subplan tuple might also contain junk columns, which are ignored.
788 : * Note that the projection also ensures we have a slot of the right type.
789 : */
790 : TupleTableSlot *
791 316182 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
792 : TupleTableSlot *planSlot,
793 : TupleTableSlot *oldSlot)
794 : {
795 316182 : ProjectionInfo *newProj = relinfo->ri_projectNew;
796 : ExprContext *econtext;
797 :
798 : /* Use a few extra Asserts to protect against outside callers */
799 : Assert(relinfo->ri_projectNewInfoValid);
800 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
801 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
802 :
803 316182 : econtext = newProj->pi_exprContext;
804 316182 : econtext->ecxt_outertuple = planSlot;
805 316182 : econtext->ecxt_scantuple = oldSlot;
806 316182 : return ExecProject(newProj);
807 : }
808 :
809 : /* ----------------------------------------------------------------
810 : * ExecInsert
811 : *
812 : * For INSERT, we have to insert the tuple into the target relation
813 : * (or partition thereof) and insert appropriate tuples into the index
814 : * relations.
815 : *
816 : * slot contains the new tuple value to be stored.
817 : *
818 : * Returns RETURNING result if any, otherwise NULL.
819 : * *inserted_tuple is the tuple that's effectively inserted;
820 : * *insert_destrel is the relation where it was inserted.
821 : * These are only set on success.
822 : *
823 : * This may change the currently active tuple conversion map in
824 : * mtstate->mt_transition_capture, so the callers must take care to
825 : * save the previous value to avoid losing track of it.
826 : * ----------------------------------------------------------------
827 : */
828 : static TupleTableSlot *
829 11591648 : ExecInsert(ModifyTableContext *context,
830 : ResultRelInfo *resultRelInfo,
831 : TupleTableSlot *slot,
832 : bool canSetTag,
833 : TupleTableSlot **inserted_tuple,
834 : ResultRelInfo **insert_destrel)
835 : {
836 11591648 : ModifyTableState *mtstate = context->mtstate;
837 11591648 : EState *estate = context->estate;
838 : Relation resultRelationDesc;
839 11591648 : List *recheckIndexes = NIL;
840 11591648 : TupleTableSlot *planSlot = context->planSlot;
841 11591648 : TupleTableSlot *result = NULL;
842 : TransitionCaptureState *ar_insert_trig_tcs;
843 11591648 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
844 11591648 : OnConflictAction onconflict = node->onConflictAction;
845 11591648 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
846 : MemoryContext oldContext;
847 :
848 : /*
849 : * If the input result relation is a partitioned table, find the leaf
850 : * partition to insert the tuple into.
851 : */
852 11591648 : if (proute)
853 : {
854 : ResultRelInfo *partRelInfo;
855 :
856 728270 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
857 : resultRelInfo, slot,
858 : &partRelInfo);
859 728066 : resultRelInfo = partRelInfo;
860 : }
861 :
862 11591444 : ExecMaterializeSlot(slot);
863 :
864 11591444 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
865 :
866 : /*
867 : * Open the table's indexes, if we have not done so already, so that we
868 : * can add new index entries for the inserted tuple.
869 : */
870 11591444 : if (resultRelationDesc->rd_rel->relhasindex &&
871 3090232 : resultRelInfo->ri_IndexRelationDescs == NULL)
872 31886 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
873 :
874 : /*
875 : * BEFORE ROW INSERT Triggers.
876 : *
877 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
878 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
879 : * violations before firing these triggers, because they can change the
880 : * values to insert. Also, they can run arbitrary user-defined code with
881 : * side-effects that we can't cancel by just not inserting the tuple.
882 : */
883 11591444 : if (resultRelInfo->ri_TrigDesc &&
884 75210 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
885 : {
886 : /* Flush any pending inserts, so rows are visible to the triggers */
887 2150 : if (estate->es_insert_pending_result_relations != NIL)
888 6 : ExecPendingInserts(estate);
889 :
890 2150 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
891 200 : return NULL; /* "do nothing" */
892 : }
893 :
894 : /* INSTEAD OF ROW INSERT Triggers */
895 11591128 : if (resultRelInfo->ri_TrigDesc &&
896 74894 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
897 : {
898 168 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
899 6 : return NULL; /* "do nothing" */
900 : }
901 11590960 : else if (resultRelInfo->ri_FdwRoutine)
902 : {
903 : /*
904 : * GENERATED expressions might reference the tableoid column, so
905 : * (re-)initialize tts_tableOid before evaluating them.
906 : */
907 2014 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
908 :
909 : /*
910 : * Compute stored generated columns
911 : */
912 2014 : if (resultRelationDesc->rd_att->constr &&
913 366 : resultRelationDesc->rd_att->constr->has_generated_stored)
914 8 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
915 : CMD_INSERT);
916 :
917 : /*
918 : * If the FDW supports batching, and batching is requested, accumulate
919 : * rows and insert them in batches. Otherwise use the per-row inserts.
920 : */
921 2014 : if (resultRelInfo->ri_BatchSize > 1)
922 : {
923 288 : bool flushed = false;
924 :
925 : /*
926 : * When we've reached the desired batch size, perform the
927 : * insertion.
928 : */
929 288 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
930 : {
931 20 : ExecBatchInsert(mtstate, resultRelInfo,
932 : resultRelInfo->ri_Slots,
933 : resultRelInfo->ri_PlanSlots,
934 : resultRelInfo->ri_NumSlots,
935 : estate, canSetTag);
936 20 : flushed = true;
937 : }
938 :
939 288 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
940 :
941 288 : if (resultRelInfo->ri_Slots == NULL)
942 : {
943 56 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
944 28 : resultRelInfo->ri_BatchSize);
945 28 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
946 28 : resultRelInfo->ri_BatchSize);
947 : }
948 :
949 : /*
950 : * Initialize the batch slots. We don't know how many slots will
951 : * be needed, so we initialize them as the batch grows, and we
952 : * keep them across batches. To mitigate an inefficiency in how
953 : * resource owner handles objects with many references (as with
954 : * many slots all referencing the same tuple descriptor) we copy
955 : * the appropriate tuple descriptor for each slot.
956 : */
957 288 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
958 : {
959 142 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
960 : TupleDesc plan_tdesc =
961 142 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
962 :
963 284 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
964 142 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
965 :
966 284 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
967 142 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
968 :
969 : /* remember how many batch slots we initialized */
970 142 : resultRelInfo->ri_NumSlotsInitialized++;
971 : }
972 :
973 288 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
974 : slot);
975 :
976 288 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
977 : planSlot);
978 :
979 : /*
980 : * If these are the first tuples stored in the buffers, add the
981 : * target rel and the mtstate to the
982 : * es_insert_pending_result_relations and
983 : * es_insert_pending_modifytables lists respectively, except in
984 : * the case where flushing was done above, in which case they
985 : * would already have been added to the lists, so no need to do
986 : * this.
987 : */
988 288 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
989 : {
990 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
991 : resultRelInfo));
992 36 : estate->es_insert_pending_result_relations =
993 36 : lappend(estate->es_insert_pending_result_relations,
994 : resultRelInfo);
995 36 : estate->es_insert_pending_modifytables =
996 36 : lappend(estate->es_insert_pending_modifytables, mtstate);
997 : }
998 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
999 : resultRelInfo));
1000 :
1001 288 : resultRelInfo->ri_NumSlots++;
1002 :
1003 288 : MemoryContextSwitchTo(oldContext);
1004 :
1005 288 : return NULL;
1006 : }
1007 :
1008 : /*
1009 : * insert into foreign table: let the FDW do it
1010 : */
1011 1726 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
1012 : resultRelInfo,
1013 : slot,
1014 : planSlot);
1015 :
1016 1720 : if (slot == NULL) /* "do nothing" */
1017 4 : return NULL;
1018 :
1019 : /*
1020 : * AFTER ROW Triggers or RETURNING expressions might reference the
1021 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
1022 : * them. (This covers the case where the FDW replaced the slot.)
1023 : */
1024 1716 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1025 : }
1026 : else
1027 : {
1028 : WCOKind wco_kind;
1029 :
1030 : /*
1031 : * Constraints and GENERATED expressions might reference the tableoid
1032 : * column, so (re-)initialize tts_tableOid before evaluating them.
1033 : */
1034 11588946 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1035 :
1036 : /*
1037 : * Compute stored generated columns
1038 : */
1039 11588946 : if (resultRelationDesc->rd_att->constr &&
1040 3299986 : resultRelationDesc->rd_att->constr->has_generated_stored)
1041 1046 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1042 : CMD_INSERT);
1043 :
1044 : /*
1045 : * Check any RLS WITH CHECK policies.
1046 : *
1047 : * Normally we should check INSERT policies. But if the insert is the
1048 : * result of a partition key update that moved the tuple to a new
1049 : * partition, we should instead check UPDATE policies, because we are
1050 : * executing policies defined on the target table, and not those
1051 : * defined on the child partitions.
1052 : *
1053 : * If we're running MERGE, we refer to the action that we're executing
1054 : * to know if we're doing an INSERT or UPDATE to a partition table.
1055 : */
1056 11588922 : if (mtstate->operation == CMD_UPDATE)
1057 782 : wco_kind = WCO_RLS_UPDATE_CHECK;
1058 11588140 : else if (mtstate->operation == CMD_MERGE)
1059 1736 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1060 1736 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
1061 : else
1062 11586404 : wco_kind = WCO_RLS_INSERT_CHECK;
1063 :
1064 : /*
1065 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1066 : * we are looking for at this point.
1067 : */
1068 11588922 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1069 582 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1070 :
1071 : /*
1072 : * Check the constraints of the tuple.
1073 : */
1074 11588736 : if (resultRelationDesc->rd_att->constr)
1075 3299866 : ExecConstraints(resultRelInfo, slot, estate);
1076 :
1077 : /*
1078 : * Also check the tuple against the partition constraint, if there is
1079 : * one; except that if we got here via tuple-routing, we don't need to
1080 : * if there's no BR trigger defined on the partition.
1081 : */
1082 11588036 : if (resultRelationDesc->rd_rel->relispartition &&
1083 732294 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1084 727476 : (resultRelInfo->ri_TrigDesc &&
1085 1526 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1086 5014 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1087 :
1088 11587868 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1089 4112 : {
1090 : /* Perform a speculative insertion. */
1091 : uint32 specToken;
1092 : ItemPointerData conflictTid;
1093 : ItemPointerData invalidItemPtr;
1094 : bool specConflict;
1095 : List *arbiterIndexes;
1096 :
1097 9542 : ItemPointerSetInvalid(&invalidItemPtr);
1098 9542 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1099 :
1100 : /*
1101 : * Do a non-conclusive check for conflicts first.
1102 : *
1103 : * We're not holding any locks yet, so this doesn't guarantee that
1104 : * the later insert won't conflict. But it avoids leaving behind
1105 : * a lot of canceled speculative insertions, if you run a lot of
1106 : * INSERT ON CONFLICT statements that do conflict.
1107 : *
1108 : * We loop back here if we find a conflict below, either during
1109 : * the pre-check, or when we re-check after inserting the tuple
1110 : * speculatively. Better allow interrupts in case some bug makes
1111 : * this an infinite loop.
1112 : */
1113 9552 : vlock:
1114 9552 : CHECK_FOR_INTERRUPTS();
1115 9552 : specConflict = false;
1116 9552 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1117 : &conflictTid, &invalidItemPtr,
1118 : arbiterIndexes))
1119 : {
1120 : /* committed conflict tuple found */
1121 5418 : if (onconflict == ONCONFLICT_UPDATE)
1122 : {
1123 : /*
1124 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1125 : * part. Be prepared to retry if the UPDATE fails because
1126 : * of another concurrent UPDATE/DELETE to the conflict
1127 : * tuple.
1128 : */
1129 5206 : TupleTableSlot *returning = NULL;
1130 :
1131 5206 : if (ExecOnConflictUpdate(context, resultRelInfo,
1132 : &conflictTid, slot, canSetTag,
1133 : &returning))
1134 : {
1135 5128 : InstrCountTuples2(&mtstate->ps, 1);
1136 5128 : return returning;
1137 : }
1138 : else
1139 0 : goto vlock;
1140 : }
1141 : else
1142 : {
1143 : /*
1144 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1145 : * verify that the tuple is visible to the executor's MVCC
1146 : * snapshot at higher isolation levels.
1147 : *
1148 : * Using ExecGetReturningSlot() to store the tuple for the
1149 : * recheck isn't that pretty, but we can't trivially use
1150 : * the input slot, because it might not be of a compatible
1151 : * type. As there's no conflicting usage of
1152 : * ExecGetReturningSlot() in the DO NOTHING case...
1153 : */
1154 : Assert(onconflict == ONCONFLICT_NOTHING);
1155 212 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1156 : ExecGetReturningSlot(estate, resultRelInfo));
1157 192 : InstrCountTuples2(&mtstate->ps, 1);
1158 192 : return NULL;
1159 : }
1160 : }
1161 :
1162 : /*
1163 : * Before we start insertion proper, acquire our "speculative
1164 : * insertion lock". Others can use that to wait for us to decide
1165 : * if we're going to go ahead with the insertion, instead of
1166 : * waiting for the whole transaction to complete.
1167 : */
1168 4128 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1169 :
1170 : /* insert the tuple, with the speculative token */
1171 4128 : table_tuple_insert_speculative(resultRelationDesc, slot,
1172 : estate->es_output_cid,
1173 : 0,
1174 : NULL,
1175 : specToken);
1176 :
1177 : /* insert index entries for tuple */
1178 4128 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1179 : slot, estate, false, true,
1180 : &specConflict,
1181 : arbiterIndexes,
1182 : false);
1183 :
1184 : /* adjust the tuple's state accordingly */
1185 4122 : table_tuple_complete_speculative(resultRelationDesc, slot,
1186 4122 : specToken, !specConflict);
1187 :
1188 : /*
1189 : * Wake up anyone waiting for our decision. They will re-check
1190 : * the tuple, see that it's no longer speculative, and wait on our
1191 : * XID as if this was a regularly inserted tuple all along. Or if
1192 : * we killed the tuple, they will see it's dead, and proceed as if
1193 : * the tuple never existed.
1194 : */
1195 4122 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1196 :
1197 : /*
1198 : * If there was a conflict, start from the beginning. We'll do
1199 : * the pre-check again, which will now find the conflicting tuple
1200 : * (unless it aborts before we get there).
1201 : */
1202 4122 : if (specConflict)
1203 : {
1204 10 : list_free(recheckIndexes);
1205 10 : goto vlock;
1206 : }
1207 :
1208 : /* Since there was no insertion conflict, we're done */
1209 : }
1210 : else
1211 : {
1212 : /* insert the tuple normally */
1213 11578326 : table_tuple_insert(resultRelationDesc, slot,
1214 : estate->es_output_cid,
1215 : 0, NULL);
1216 :
1217 : /* insert index entries for tuple */
1218 11578290 : if (resultRelInfo->ri_NumIndices > 0)
1219 3080104 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1220 : slot, estate, false,
1221 : false, NULL, NIL,
1222 : false);
1223 : }
1224 : }
1225 :
1226 11583696 : if (canSetTag)
1227 11582518 : (estate->es_processed)++;
1228 :
1229 : /*
1230 : * If this insert is the result of a partition key update that moved the
1231 : * tuple to a new partition, put this row into the transition NEW TABLE,
1232 : * if there is one. We need to do this separately for DELETE and INSERT
1233 : * because they happen on different tables.
1234 : */
1235 11583696 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1236 11583696 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1237 54 : && mtstate->mt_transition_capture->tcs_update_new_table)
1238 : {
1239 48 : ExecARUpdateTriggers(estate, resultRelInfo,
1240 : NULL, NULL,
1241 : NULL,
1242 : NULL,
1243 : slot,
1244 : NULL,
1245 48 : mtstate->mt_transition_capture,
1246 : false);
1247 :
1248 : /*
1249 : * We've already captured the NEW TABLE row, so make sure any AR
1250 : * INSERT trigger fired below doesn't capture it again.
1251 : */
1252 48 : ar_insert_trig_tcs = NULL;
1253 : }
1254 :
1255 : /* AFTER ROW INSERT Triggers */
1256 11583696 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1257 : ar_insert_trig_tcs);
1258 :
1259 11583696 : list_free(recheckIndexes);
1260 :
1261 : /*
1262 : * Check any WITH CHECK OPTION constraints from parent views. We are
1263 : * required to do this after testing all constraints and uniqueness
1264 : * violations per the SQL spec, so we do it after actually inserting the
1265 : * record into the heap and all indexes.
1266 : *
1267 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1268 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1269 : *
1270 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1271 : * are looking for at this point.
1272 : */
1273 11583696 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1274 382 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1275 :
1276 : /* Process RETURNING if present */
1277 11583550 : if (resultRelInfo->ri_projectReturning)
1278 : {
1279 3580 : TupleTableSlot *oldSlot = NULL;
1280 :
1281 : /*
1282 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1283 : * refers to any OLD columns, ExecDelete() will have saved the tuple
1284 : * deleted from the original partition, which we must use here to
1285 : * compute the OLD column values. Otherwise, all OLD column values
1286 : * will be NULL.
1287 : */
1288 3580 : if (context->cpDeletedSlot)
1289 : {
1290 : TupleConversionMap *tupconv_map;
1291 :
1292 : /*
1293 : * Convert the OLD tuple to the new partition's format/slot, if
1294 : * needed. Note that ExceDelete() already converted it to the
1295 : * root's partition's format/slot.
1296 : */
1297 44 : oldSlot = context->cpDeletedSlot;
1298 44 : tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1299 44 : if (tupconv_map != NULL)
1300 : {
1301 14 : oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1302 : oldSlot,
1303 : ExecGetReturningSlot(estate,
1304 : resultRelInfo));
1305 :
1306 14 : oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1307 14 : ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1308 : }
1309 : }
1310 :
1311 3580 : result = ExecProcessReturning(context, resultRelInfo, CMD_INSERT,
1312 : oldSlot, slot, planSlot);
1313 :
1314 : /*
1315 : * For a cross-partition UPDATE, release the old tuple, first making
1316 : * sure that the result slot has a local copy of any pass-by-reference
1317 : * values.
1318 : */
1319 3568 : if (context->cpDeletedSlot)
1320 : {
1321 44 : ExecMaterializeSlot(result);
1322 44 : ExecClearTuple(oldSlot);
1323 44 : if (context->cpDeletedSlot != oldSlot)
1324 14 : ExecClearTuple(context->cpDeletedSlot);
1325 44 : context->cpDeletedSlot = NULL;
1326 : }
1327 : }
1328 :
1329 11583538 : if (inserted_tuple)
1330 808 : *inserted_tuple = slot;
1331 11583538 : if (insert_destrel)
1332 808 : *insert_destrel = resultRelInfo;
1333 :
1334 11583538 : return result;
1335 : }
1336 :
1337 : /* ----------------------------------------------------------------
1338 : * ExecBatchInsert
1339 : *
1340 : * Insert multiple tuples in an efficient way.
1341 : * Currently, this handles inserting into a foreign table without
1342 : * RETURNING clause.
1343 : * ----------------------------------------------------------------
1344 : */
1345 : static void
1346 56 : ExecBatchInsert(ModifyTableState *mtstate,
1347 : ResultRelInfo *resultRelInfo,
1348 : TupleTableSlot **slots,
1349 : TupleTableSlot **planSlots,
1350 : int numSlots,
1351 : EState *estate,
1352 : bool canSetTag)
1353 : {
1354 : int i;
1355 56 : int numInserted = numSlots;
1356 56 : TupleTableSlot *slot = NULL;
1357 : TupleTableSlot **rslots;
1358 :
1359 : /*
1360 : * insert into foreign table: let the FDW do it
1361 : */
1362 56 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1363 : resultRelInfo,
1364 : slots,
1365 : planSlots,
1366 : &numInserted);
1367 :
1368 344 : for (i = 0; i < numInserted; i++)
1369 : {
1370 288 : slot = rslots[i];
1371 :
1372 : /*
1373 : * AFTER ROW Triggers might reference the tableoid column, so
1374 : * (re-)initialize tts_tableOid before evaluating them.
1375 : */
1376 288 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1377 :
1378 : /* AFTER ROW INSERT Triggers */
1379 288 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1380 288 : mtstate->mt_transition_capture);
1381 :
1382 : /*
1383 : * Check any WITH CHECK OPTION constraints from parent views. See the
1384 : * comment in ExecInsert.
1385 : */
1386 288 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1387 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1388 : }
1389 :
1390 56 : if (canSetTag && numInserted > 0)
1391 56 : estate->es_processed += numInserted;
1392 :
1393 : /* Clean up all the slots, ready for the next batch */
1394 344 : for (i = 0; i < numSlots; i++)
1395 : {
1396 288 : ExecClearTuple(slots[i]);
1397 288 : ExecClearTuple(planSlots[i]);
1398 : }
1399 56 : resultRelInfo->ri_NumSlots = 0;
1400 56 : }
1401 :
1402 : /*
1403 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1404 : */
1405 : static void
1406 34 : ExecPendingInserts(EState *estate)
1407 : {
1408 : ListCell *l1,
1409 : *l2;
1410 :
1411 70 : forboth(l1, estate->es_insert_pending_result_relations,
1412 : l2, estate->es_insert_pending_modifytables)
1413 : {
1414 36 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1415 36 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1416 :
1417 : Assert(mtstate);
1418 36 : ExecBatchInsert(mtstate, resultRelInfo,
1419 : resultRelInfo->ri_Slots,
1420 : resultRelInfo->ri_PlanSlots,
1421 : resultRelInfo->ri_NumSlots,
1422 36 : estate, mtstate->canSetTag);
1423 : }
1424 :
1425 34 : list_free(estate->es_insert_pending_result_relations);
1426 34 : list_free(estate->es_insert_pending_modifytables);
1427 34 : estate->es_insert_pending_result_relations = NIL;
1428 34 : estate->es_insert_pending_modifytables = NIL;
1429 34 : }
1430 :
1431 : /*
1432 : * ExecDeletePrologue -- subroutine for ExecDelete
1433 : *
1434 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1435 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1436 : * the delete a no-op; otherwise, return true.
1437 : */
1438 : static bool
1439 1646072 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1440 : ItemPointer tupleid, HeapTuple oldtuple,
1441 : TupleTableSlot **epqreturnslot, TM_Result *result)
1442 : {
1443 1646072 : if (result)
1444 1556 : *result = TM_Ok;
1445 :
1446 : /* BEFORE ROW DELETE triggers */
1447 1646072 : if (resultRelInfo->ri_TrigDesc &&
1448 7044 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1449 : {
1450 : /* Flush any pending inserts, so rows are visible to the triggers */
1451 394 : if (context->estate->es_insert_pending_result_relations != NIL)
1452 2 : ExecPendingInserts(context->estate);
1453 :
1454 394 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1455 : resultRelInfo, tupleid, oldtuple,
1456 : epqreturnslot, result, &context->tmfd);
1457 : }
1458 :
1459 1645678 : return true;
1460 : }
1461 :
1462 : /*
1463 : * ExecDeleteAct -- subroutine for ExecDelete
1464 : *
1465 : * Actually delete the tuple from a plain table.
1466 : *
1467 : * Caller is in charge of doing EvalPlanQual as necessary
1468 : */
1469 : static TM_Result
1470 1645876 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1471 : ItemPointer tupleid, bool changingPart)
1472 : {
1473 1645876 : EState *estate = context->estate;
1474 :
1475 1645876 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1476 : estate->es_output_cid,
1477 : estate->es_snapshot,
1478 : estate->es_crosscheck_snapshot,
1479 : true /* wait for commit */ ,
1480 : &context->tmfd,
1481 : changingPart);
1482 : }
1483 :
1484 : /*
1485 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1486 : *
1487 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1488 : * including the UPDATE triggers if the deletion is being done as part of a
1489 : * cross-partition tuple move.
1490 : */
1491 : static void
1492 1645824 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1493 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1494 : {
1495 1645824 : ModifyTableState *mtstate = context->mtstate;
1496 1645824 : EState *estate = context->estate;
1497 : TransitionCaptureState *ar_delete_trig_tcs;
1498 :
1499 : /*
1500 : * If this delete is the result of a partition key update that moved the
1501 : * tuple to a new partition, put this row into the transition OLD TABLE,
1502 : * if there is one. We need to do this separately for DELETE and INSERT
1503 : * because they happen on different tables.
1504 : */
1505 1645824 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1506 1645824 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1507 54 : mtstate->mt_transition_capture->tcs_update_old_table)
1508 : {
1509 48 : ExecARUpdateTriggers(estate, resultRelInfo,
1510 : NULL, NULL,
1511 : tupleid, oldtuple,
1512 48 : NULL, NULL, mtstate->mt_transition_capture,
1513 : false);
1514 :
1515 : /*
1516 : * We've already captured the OLD TABLE row, so make sure any AR
1517 : * DELETE trigger fired below doesn't capture it again.
1518 : */
1519 48 : ar_delete_trig_tcs = NULL;
1520 : }
1521 :
1522 : /* AFTER ROW DELETE Triggers */
1523 1645824 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1524 : ar_delete_trig_tcs, changingPart);
1525 1645824 : }
1526 :
1527 : /* ----------------------------------------------------------------
1528 : * ExecDelete
1529 : *
1530 : * DELETE is like UPDATE, except that we delete the tuple and no
1531 : * index modifications are needed.
1532 : *
1533 : * When deleting from a table, tupleid identifies the tuple to delete and
1534 : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1535 : * oldtuple is passed to the triggers and identifies what to delete, and
1536 : * tupleid is invalid. When deleting from a foreign table, tupleid is
1537 : * invalid; the FDW has to figure out which row to delete using data from
1538 : * the planSlot. oldtuple is passed to foreign table triggers; it is
1539 : * NULL when the foreign table has no relevant triggers. We use
1540 : * tupleDeleted to indicate whether the tuple is actually deleted,
1541 : * callers can use it to decide whether to continue the operation. When
1542 : * this DELETE is a part of an UPDATE of partition-key, then the slot
1543 : * returned by EvalPlanQual() is passed back using output parameter
1544 : * epqreturnslot.
1545 : *
1546 : * Returns RETURNING result if any, otherwise NULL.
1547 : * ----------------------------------------------------------------
1548 : */
1549 : static TupleTableSlot *
1550 1645558 : ExecDelete(ModifyTableContext *context,
1551 : ResultRelInfo *resultRelInfo,
1552 : ItemPointer tupleid,
1553 : HeapTuple oldtuple,
1554 : bool processReturning,
1555 : bool changingPart,
1556 : bool canSetTag,
1557 : TM_Result *tmresult,
1558 : bool *tupleDeleted,
1559 : TupleTableSlot **epqreturnslot)
1560 : {
1561 1645558 : EState *estate = context->estate;
1562 1645558 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1563 1645558 : TupleTableSlot *slot = NULL;
1564 : TM_Result result;
1565 : bool saveOld;
1566 :
1567 1645558 : if (tupleDeleted)
1568 1042 : *tupleDeleted = false;
1569 :
1570 : /*
1571 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1572 : * done if it says we are.
1573 : */
1574 1645558 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1575 : epqreturnslot, tmresult))
1576 52 : return NULL;
1577 :
1578 : /* INSTEAD OF ROW DELETE Triggers */
1579 1645472 : if (resultRelInfo->ri_TrigDesc &&
1580 6902 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1581 48 : {
1582 : bool dodelete;
1583 :
1584 : Assert(oldtuple != NULL);
1585 54 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1586 :
1587 54 : if (!dodelete) /* "do nothing" */
1588 6 : return NULL;
1589 : }
1590 1645418 : else if (resultRelInfo->ri_FdwRoutine)
1591 : {
1592 : /*
1593 : * delete from foreign table: let the FDW do it
1594 : *
1595 : * We offer the returning slot as a place to store RETURNING data,
1596 : * although the FDW can return some other slot if it wants.
1597 : */
1598 42 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1599 42 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1600 : resultRelInfo,
1601 : slot,
1602 : context->planSlot);
1603 :
1604 42 : if (slot == NULL) /* "do nothing" */
1605 0 : return NULL;
1606 :
1607 : /*
1608 : * RETURNING expressions might reference the tableoid column, so
1609 : * (re)initialize tts_tableOid before evaluating them.
1610 : */
1611 42 : if (TTS_EMPTY(slot))
1612 6 : ExecStoreAllNullTuple(slot);
1613 :
1614 42 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1615 : }
1616 : else
1617 : {
1618 : /*
1619 : * delete the tuple
1620 : *
1621 : * Note: if context->estate->es_crosscheck_snapshot isn't
1622 : * InvalidSnapshot, we check that the row to be deleted is visible to
1623 : * that snapshot, and throw a can't-serialize error if not. This is a
1624 : * special-case behavior needed for referential integrity updates in
1625 : * transaction-snapshot mode transactions.
1626 : */
1627 1645376 : ldelete:
1628 1645380 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1629 :
1630 1645344 : if (tmresult)
1631 1008 : *tmresult = result;
1632 :
1633 1645344 : switch (result)
1634 : {
1635 30 : case TM_SelfModified:
1636 :
1637 : /*
1638 : * The target tuple was already updated or deleted by the
1639 : * current command, or by a later command in the current
1640 : * transaction. The former case is possible in a join DELETE
1641 : * where multiple tuples join to the same target tuple. This
1642 : * is somewhat questionable, but Postgres has always allowed
1643 : * it: we just ignore additional deletion attempts.
1644 : *
1645 : * The latter case arises if the tuple is modified by a
1646 : * command in a BEFORE trigger, or perhaps by a command in a
1647 : * volatile function used in the query. In such situations we
1648 : * should not ignore the deletion, but it is equally unsafe to
1649 : * proceed. We don't want to discard the original DELETE
1650 : * while keeping the triggered actions based on its deletion;
1651 : * and it would be no better to allow the original DELETE
1652 : * while discarding updates that it triggered. The row update
1653 : * carries some information that might be important according
1654 : * to business rules; so throwing an error is the only safe
1655 : * course.
1656 : *
1657 : * If a trigger actually intends this type of interaction, it
1658 : * can re-execute the DELETE and then return NULL to cancel
1659 : * the outer delete.
1660 : */
1661 30 : if (context->tmfd.cmax != estate->es_output_cid)
1662 6 : ereport(ERROR,
1663 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1664 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1665 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1666 :
1667 : /* Else, already deleted by self; nothing to do */
1668 24 : return NULL;
1669 :
1670 1645250 : case TM_Ok:
1671 1645250 : break;
1672 :
1673 58 : case TM_Updated:
1674 : {
1675 : TupleTableSlot *inputslot;
1676 : TupleTableSlot *epqslot;
1677 :
1678 58 : if (IsolationUsesXactSnapshot())
1679 2 : ereport(ERROR,
1680 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1681 : errmsg("could not serialize access due to concurrent update")));
1682 :
1683 : /*
1684 : * Already know that we're going to need to do EPQ, so
1685 : * fetch tuple directly into the right slot.
1686 : */
1687 56 : EvalPlanQualBegin(context->epqstate);
1688 56 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1689 : resultRelInfo->ri_RangeTableIndex);
1690 :
1691 56 : result = table_tuple_lock(resultRelationDesc, tupleid,
1692 : estate->es_snapshot,
1693 : inputslot, estate->es_output_cid,
1694 : LockTupleExclusive, LockWaitBlock,
1695 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1696 : &context->tmfd);
1697 :
1698 52 : switch (result)
1699 : {
1700 46 : case TM_Ok:
1701 : Assert(context->tmfd.traversed);
1702 46 : epqslot = EvalPlanQual(context->epqstate,
1703 : resultRelationDesc,
1704 : resultRelInfo->ri_RangeTableIndex,
1705 : inputslot);
1706 46 : if (TupIsNull(epqslot))
1707 : /* Tuple not passing quals anymore, exiting... */
1708 30 : return NULL;
1709 :
1710 : /*
1711 : * If requested, skip delete and pass back the
1712 : * updated row.
1713 : */
1714 16 : if (epqreturnslot)
1715 : {
1716 12 : *epqreturnslot = epqslot;
1717 12 : return NULL;
1718 : }
1719 : else
1720 4 : goto ldelete;
1721 :
1722 4 : case TM_SelfModified:
1723 :
1724 : /*
1725 : * This can be reached when following an update
1726 : * chain from a tuple updated by another session,
1727 : * reaching a tuple that was already updated in
1728 : * this transaction. If previously updated by this
1729 : * command, ignore the delete, otherwise error
1730 : * out.
1731 : *
1732 : * See also TM_SelfModified response to
1733 : * table_tuple_delete() above.
1734 : */
1735 4 : if (context->tmfd.cmax != estate->es_output_cid)
1736 2 : ereport(ERROR,
1737 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1738 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1739 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1740 2 : return NULL;
1741 :
1742 2 : case TM_Deleted:
1743 : /* tuple already deleted; nothing to do */
1744 2 : return NULL;
1745 :
1746 0 : default:
1747 :
1748 : /*
1749 : * TM_Invisible should be impossible because we're
1750 : * waiting for updated row versions, and would
1751 : * already have errored out if the first version
1752 : * is invisible.
1753 : *
1754 : * TM_Updated should be impossible, because we're
1755 : * locking the latest version via
1756 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1757 : */
1758 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1759 : result);
1760 : return NULL;
1761 : }
1762 :
1763 : Assert(false);
1764 : break;
1765 : }
1766 :
1767 6 : case TM_Deleted:
1768 6 : if (IsolationUsesXactSnapshot())
1769 0 : ereport(ERROR,
1770 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1771 : errmsg("could not serialize access due to concurrent delete")));
1772 : /* tuple already deleted; nothing to do */
1773 6 : return NULL;
1774 :
1775 0 : default:
1776 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1777 : result);
1778 : return NULL;
1779 : }
1780 :
1781 : /*
1782 : * Note: Normally one would think that we have to delete index tuples
1783 : * associated with the heap tuple now...
1784 : *
1785 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1786 : * take care of it later. We can't delete index tuples immediately
1787 : * anyway, since the tuple is still visible to other transactions.
1788 : */
1789 : }
1790 :
1791 1645340 : if (canSetTag)
1792 1644148 : (estate->es_processed)++;
1793 :
1794 : /* Tell caller that the delete actually happened. */
1795 1645340 : if (tupleDeleted)
1796 964 : *tupleDeleted = true;
1797 :
1798 1645340 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1799 :
1800 : /*
1801 : * Process RETURNING if present and if requested.
1802 : *
1803 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1804 : * refers to any OLD column values, save the old tuple here for later
1805 : * processing of the RETURNING list by ExecInsert().
1806 : */
1807 1645486 : saveOld = changingPart && resultRelInfo->ri_projectReturning &&
1808 146 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD;
1809 :
1810 1645340 : if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
1811 : {
1812 : /*
1813 : * We have to put the target tuple into a slot, which means first we
1814 : * gotta fetch it. We can use the trigger tuple slot.
1815 : */
1816 : TupleTableSlot *rslot;
1817 :
1818 984 : if (resultRelInfo->ri_FdwRoutine)
1819 : {
1820 : /* FDW must have provided a slot containing the deleted row */
1821 : Assert(!TupIsNull(slot));
1822 : }
1823 : else
1824 : {
1825 970 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1826 970 : if (oldtuple != NULL)
1827 : {
1828 24 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1829 : }
1830 : else
1831 : {
1832 946 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1833 : SnapshotAny, slot))
1834 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1835 : }
1836 : }
1837 :
1838 : /*
1839 : * If required, save the old tuple for later processing of the
1840 : * RETURNING list by ExecInsert().
1841 : */
1842 984 : if (saveOld)
1843 : {
1844 : TupleConversionMap *tupconv_map;
1845 :
1846 : /*
1847 : * Convert the tuple into the root partition's format/slot, if
1848 : * needed. ExecInsert() will then convert it to the new
1849 : * partition's format/slot, if necessary.
1850 : */
1851 44 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1852 44 : if (tupconv_map != NULL)
1853 : {
1854 18 : ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
1855 18 : TupleTableSlot *oldSlot = slot;
1856 :
1857 18 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1858 : slot,
1859 : ExecGetReturningSlot(estate,
1860 : rootRelInfo));
1861 :
1862 18 : slot->tts_tableOid = oldSlot->tts_tableOid;
1863 18 : ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
1864 : }
1865 :
1866 44 : context->cpDeletedSlot = slot;
1867 :
1868 44 : return NULL;
1869 : }
1870 :
1871 940 : rslot = ExecProcessReturning(context, resultRelInfo, CMD_DELETE,
1872 : slot, NULL, context->planSlot);
1873 :
1874 : /*
1875 : * Before releasing the target tuple again, make sure rslot has a
1876 : * local copy of any pass-by-reference values.
1877 : */
1878 940 : ExecMaterializeSlot(rslot);
1879 :
1880 940 : ExecClearTuple(slot);
1881 :
1882 940 : return rslot;
1883 : }
1884 :
1885 1644356 : return NULL;
1886 : }
1887 :
1888 : /*
1889 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1890 : *
1891 : * This works by first deleting the old tuple from the current partition,
1892 : * followed by inserting the new tuple into the root parent table, that is,
1893 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1894 : * correct partition.
1895 : *
1896 : * Returns true if the tuple has been successfully moved, or if it's found
1897 : * that the tuple was concurrently deleted so there's nothing more to do
1898 : * for the caller.
1899 : *
1900 : * False is returned if the tuple we're trying to move is found to have been
1901 : * concurrently updated. In that case, the caller must check if the updated
1902 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1903 : * this function again or perform a regular update accordingly. For MERGE,
1904 : * the updated tuple is not returned in *retry_slot; it has its own retry
1905 : * logic.
1906 : */
1907 : static bool
1908 1090 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1909 : ResultRelInfo *resultRelInfo,
1910 : ItemPointer tupleid, HeapTuple oldtuple,
1911 : TupleTableSlot *slot,
1912 : bool canSetTag,
1913 : UpdateContext *updateCxt,
1914 : TM_Result *tmresult,
1915 : TupleTableSlot **retry_slot,
1916 : TupleTableSlot **inserted_tuple,
1917 : ResultRelInfo **insert_destrel)
1918 : {
1919 1090 : ModifyTableState *mtstate = context->mtstate;
1920 1090 : EState *estate = mtstate->ps.state;
1921 : TupleConversionMap *tupconv_map;
1922 : bool tuple_deleted;
1923 1090 : TupleTableSlot *epqslot = NULL;
1924 :
1925 1090 : context->cpDeletedSlot = NULL;
1926 1090 : context->cpUpdateReturningSlot = NULL;
1927 1090 : *retry_slot = NULL;
1928 :
1929 : /*
1930 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1931 : * to migrate to a different partition. Maybe this can be implemented
1932 : * some day, but it seems a fringe feature with little redeeming value.
1933 : */
1934 1090 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1935 0 : ereport(ERROR,
1936 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1937 : errmsg("invalid ON UPDATE specification"),
1938 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1939 :
1940 : /*
1941 : * When an UPDATE is run directly on a leaf partition, simply fail with a
1942 : * partition constraint violation error.
1943 : */
1944 1090 : if (resultRelInfo == mtstate->rootResultRelInfo)
1945 48 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1946 :
1947 : /* Initialize tuple routing info if not already done. */
1948 1042 : if (mtstate->mt_partition_tuple_routing == NULL)
1949 : {
1950 662 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1951 : MemoryContext oldcxt;
1952 :
1953 : /* Things built here have to last for the query duration. */
1954 662 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1955 :
1956 662 : mtstate->mt_partition_tuple_routing =
1957 662 : ExecSetupPartitionTupleRouting(estate, rootRel);
1958 :
1959 : /*
1960 : * Before a partition's tuple can be re-routed, it must first be
1961 : * converted to the root's format, so we'll need a slot for storing
1962 : * such tuples.
1963 : */
1964 : Assert(mtstate->mt_root_tuple_slot == NULL);
1965 662 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1966 :
1967 662 : MemoryContextSwitchTo(oldcxt);
1968 : }
1969 :
1970 : /*
1971 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1972 : * We want to return rows from INSERT.
1973 : */
1974 1042 : ExecDelete(context, resultRelInfo,
1975 : tupleid, oldtuple,
1976 : false, /* processReturning */
1977 : true, /* changingPart */
1978 : false, /* canSetTag */
1979 : tmresult, &tuple_deleted, &epqslot);
1980 :
1981 : /*
1982 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
1983 : * it was already deleted by self, or it was concurrently deleted by
1984 : * another transaction), then we should skip the insert as well;
1985 : * otherwise, an UPDATE could cause an increase in the total number of
1986 : * rows across all partitions, which is clearly wrong.
1987 : *
1988 : * For a normal UPDATE, the case where the tuple has been the subject of a
1989 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
1990 : * machinery, but for an UPDATE that we've translated into a DELETE from
1991 : * this partition and an INSERT into some other partition, that's not
1992 : * available, because CTID chains can't span relation boundaries. We
1993 : * mimic the semantics to a limited extent by skipping the INSERT if the
1994 : * DELETE fails to find a tuple. This ensures that two concurrent
1995 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
1996 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
1997 : * it.
1998 : */
1999 1040 : if (!tuple_deleted)
2000 : {
2001 : /*
2002 : * epqslot will be typically NULL. But when ExecDelete() finds that
2003 : * another transaction has concurrently updated the same row, it
2004 : * re-fetches the row, skips the delete, and epqslot is set to the
2005 : * re-fetched tuple slot. In that case, we need to do all the checks
2006 : * again. For MERGE, we leave everything to the caller (it must do
2007 : * additional rechecking, and might end up executing a different
2008 : * action entirely).
2009 : */
2010 76 : if (mtstate->operation == CMD_MERGE)
2011 34 : return *tmresult == TM_Ok;
2012 42 : else if (TupIsNull(epqslot))
2013 36 : return true;
2014 : else
2015 : {
2016 : /* Fetch the most recent version of old tuple. */
2017 : TupleTableSlot *oldSlot;
2018 :
2019 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
2020 6 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2021 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
2022 6 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2023 6 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2024 : tupleid,
2025 : SnapshotAny,
2026 : oldSlot))
2027 0 : elog(ERROR, "failed to fetch tuple being updated");
2028 : /* and project the new tuple to retry the UPDATE with */
2029 6 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2030 : oldSlot);
2031 6 : return false;
2032 : }
2033 : }
2034 :
2035 : /*
2036 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
2037 : * convert the tuple into root's tuple descriptor if needed, since
2038 : * ExecInsert() starts the search from root.
2039 : */
2040 964 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2041 964 : if (tupconv_map != NULL)
2042 314 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2043 : slot,
2044 : mtstate->mt_root_tuple_slot);
2045 :
2046 : /* Tuple routing starts from the root table. */
2047 836 : context->cpUpdateReturningSlot =
2048 964 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2049 : inserted_tuple, insert_destrel);
2050 :
2051 : /*
2052 : * Reset the transition state that may possibly have been written by
2053 : * INSERT.
2054 : */
2055 836 : if (mtstate->mt_transition_capture)
2056 54 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
2057 :
2058 : /* We're done moving. */
2059 836 : return true;
2060 : }
2061 :
2062 : /*
2063 : * ExecUpdatePrologue -- subroutine for ExecUpdate
2064 : *
2065 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
2066 : * triggers. We return false if one of them makes the update a no-op;
2067 : * otherwise, return true.
2068 : */
2069 : static bool
2070 323330 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2071 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2072 : TM_Result *result)
2073 : {
2074 323330 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2075 :
2076 323330 : if (result)
2077 2134 : *result = TM_Ok;
2078 :
2079 323330 : ExecMaterializeSlot(slot);
2080 :
2081 : /*
2082 : * Open the table's indexes, if we have not done so already, so that we
2083 : * can add new index entries for the updated tuple.
2084 : */
2085 323330 : if (resultRelationDesc->rd_rel->relhasindex &&
2086 232492 : resultRelInfo->ri_IndexRelationDescs == NULL)
2087 8656 : ExecOpenIndices(resultRelInfo, false);
2088 :
2089 : /* BEFORE ROW UPDATE triggers */
2090 323330 : if (resultRelInfo->ri_TrigDesc &&
2091 6288 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
2092 : {
2093 : /* Flush any pending inserts, so rows are visible to the triggers */
2094 2602 : if (context->estate->es_insert_pending_result_relations != NIL)
2095 2 : ExecPendingInserts(context->estate);
2096 :
2097 2602 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
2098 : resultRelInfo, tupleid, oldtuple, slot,
2099 : result, &context->tmfd);
2100 : }
2101 :
2102 320728 : return true;
2103 : }
2104 :
2105 : /*
2106 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2107 : *
2108 : * Apply the final modifications to the tuple slot before the update.
2109 : * (This is split out because we also need it in the foreign-table code path.)
2110 : */
2111 : static void
2112 323040 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
2113 : TupleTableSlot *slot,
2114 : EState *estate)
2115 : {
2116 323040 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2117 :
2118 : /*
2119 : * Constraints and GENERATED expressions might reference the tableoid
2120 : * column, so (re-)initialize tts_tableOid before evaluating them.
2121 : */
2122 323040 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2123 :
2124 : /*
2125 : * Compute stored generated columns
2126 : */
2127 323040 : if (resultRelationDesc->rd_att->constr &&
2128 195644 : resultRelationDesc->rd_att->constr->has_generated_stored)
2129 258 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2130 : CMD_UPDATE);
2131 323040 : }
2132 :
2133 : /*
2134 : * ExecUpdateAct -- subroutine for ExecUpdate
2135 : *
2136 : * Actually update the tuple, when operating on a plain table. If the
2137 : * table is a partition, and the command was called referencing an ancestor
2138 : * partitioned table, this routine migrates the resulting tuple to another
2139 : * partition.
2140 : *
2141 : * The caller is in charge of keeping indexes current as necessary. The
2142 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
2143 : * be concurrently updated. However, in case of a cross-partition update,
2144 : * this routine does it.
2145 : */
2146 : static TM_Result
2147 322848 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2148 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2149 : bool canSetTag, UpdateContext *updateCxt)
2150 : {
2151 322848 : EState *estate = context->estate;
2152 322848 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2153 : bool partition_constraint_failed;
2154 : TM_Result result;
2155 :
2156 322848 : updateCxt->crossPartUpdate = false;
2157 :
2158 : /*
2159 : * If we move the tuple to a new partition, we loop back here to recompute
2160 : * GENERATED values (which are allowed to be different across partitions)
2161 : * and recheck any RLS policies and constraints. We do not fire any
2162 : * BEFORE triggers of the new partition, however.
2163 : */
2164 322854 : lreplace:
2165 : /* Fill in GENERATEd columns */
2166 322854 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2167 :
2168 : /* ensure slot is independent, consider e.g. EPQ */
2169 322854 : ExecMaterializeSlot(slot);
2170 :
2171 : /*
2172 : * If partition constraint fails, this row might get moved to another
2173 : * partition, in which case we should check the RLS CHECK policy just
2174 : * before inserting into the new partition, rather than doing it here.
2175 : * This is because a trigger on that partition might again change the row.
2176 : * So skip the WCO checks if the partition constraint fails.
2177 : */
2178 322854 : partition_constraint_failed =
2179 325568 : resultRelationDesc->rd_rel->relispartition &&
2180 2714 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2181 :
2182 : /* Check any RLS UPDATE WITH CHECK policies */
2183 322854 : if (!partition_constraint_failed &&
2184 321764 : resultRelInfo->ri_WithCheckOptions != NIL)
2185 : {
2186 : /*
2187 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2188 : * we are looking for at this point.
2189 : */
2190 492 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2191 : resultRelInfo, slot, estate);
2192 : }
2193 :
2194 : /*
2195 : * If a partition check failed, try to move the row into the right
2196 : * partition.
2197 : */
2198 322800 : if (partition_constraint_failed)
2199 : {
2200 : TupleTableSlot *inserted_tuple,
2201 : *retry_slot;
2202 1090 : ResultRelInfo *insert_destrel = NULL;
2203 :
2204 : /*
2205 : * ExecCrossPartitionUpdate will first DELETE the row from the
2206 : * partition it's currently in and then insert it back into the root
2207 : * table, which will re-route it to the correct partition. However,
2208 : * if the tuple has been concurrently updated, a retry is needed.
2209 : */
2210 1090 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2211 : tupleid, oldtuple, slot,
2212 : canSetTag, updateCxt,
2213 : &result,
2214 : &retry_slot,
2215 : &inserted_tuple,
2216 : &insert_destrel))
2217 : {
2218 : /* success! */
2219 896 : updateCxt->crossPartUpdate = true;
2220 :
2221 : /*
2222 : * If the partitioned table being updated is referenced in foreign
2223 : * keys, queue up trigger events to check that none of them were
2224 : * violated. No special treatment is needed in
2225 : * non-cross-partition update situations, because the leaf
2226 : * partition's AR update triggers will take care of that. During
2227 : * cross-partition updates implemented as delete on the source
2228 : * partition followed by insert on the destination partition,
2229 : * AR-UPDATE triggers of the root table (that is, the table
2230 : * mentioned in the query) must be fired.
2231 : *
2232 : * NULL insert_destrel means that the move failed to occur, that
2233 : * is, the update failed, so no need to anything in that case.
2234 : */
2235 896 : if (insert_destrel &&
2236 808 : resultRelInfo->ri_TrigDesc &&
2237 362 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2238 300 : ExecCrossPartitionUpdateForeignKey(context,
2239 : resultRelInfo,
2240 : insert_destrel,
2241 : tupleid, slot,
2242 : inserted_tuple);
2243 :
2244 900 : return TM_Ok;
2245 : }
2246 :
2247 : /*
2248 : * No luck, a retry is needed. If running MERGE, we do not do so
2249 : * here; instead let it handle that on its own rules.
2250 : */
2251 16 : if (context->mtstate->operation == CMD_MERGE)
2252 10 : return result;
2253 :
2254 : /*
2255 : * ExecCrossPartitionUpdate installed an updated version of the new
2256 : * tuple in the retry slot; start over.
2257 : */
2258 6 : slot = retry_slot;
2259 6 : goto lreplace;
2260 : }
2261 :
2262 : /*
2263 : * Check the constraints of the tuple. We've already checked the
2264 : * partition constraint above; however, we must still ensure the tuple
2265 : * passes all other constraints, so we will call ExecConstraints() and
2266 : * have it validate all remaining checks.
2267 : */
2268 321710 : if (resultRelationDesc->rd_att->constr)
2269 195020 : ExecConstraints(resultRelInfo, slot, estate);
2270 :
2271 : /*
2272 : * replace the heap tuple
2273 : *
2274 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2275 : * the row to be updated is visible to that snapshot, and throw a
2276 : * can't-serialize error if not. This is a special-case behavior needed
2277 : * for referential integrity updates in transaction-snapshot mode
2278 : * transactions.
2279 : */
2280 321636 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2281 : estate->es_output_cid,
2282 : estate->es_snapshot,
2283 : estate->es_crosscheck_snapshot,
2284 : true /* wait for commit */ ,
2285 : &context->tmfd, &updateCxt->lockmode,
2286 : &updateCxt->updateIndexes);
2287 :
2288 321612 : return result;
2289 : }
2290 :
2291 : /*
2292 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2293 : *
2294 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2295 : * returns indicating that the tuple was updated.
2296 : */
2297 : static void
2298 321664 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2299 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2300 : HeapTuple oldtuple, TupleTableSlot *slot)
2301 : {
2302 321664 : ModifyTableState *mtstate = context->mtstate;
2303 321664 : List *recheckIndexes = NIL;
2304 :
2305 : /* insert index entries for tuple if necessary */
2306 321664 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2307 175452 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2308 : slot, context->estate,
2309 : true, false,
2310 : NULL, NIL,
2311 175452 : (updateCxt->updateIndexes == TU_Summarizing));
2312 :
2313 : /* AFTER ROW UPDATE Triggers */
2314 321574 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2315 : NULL, NULL,
2316 : tupleid, oldtuple, slot,
2317 : recheckIndexes,
2318 321574 : mtstate->operation == CMD_INSERT ?
2319 : mtstate->mt_oc_transition_capture :
2320 : mtstate->mt_transition_capture,
2321 : false);
2322 :
2323 321574 : list_free(recheckIndexes);
2324 :
2325 : /*
2326 : * Check any WITH CHECK OPTION constraints from parent views. We are
2327 : * required to do this after testing all constraints and uniqueness
2328 : * violations per the SQL spec, so we do it after actually updating the
2329 : * record in the heap and all indexes.
2330 : *
2331 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2332 : * are looking for at this point.
2333 : */
2334 321574 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2335 466 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2336 : slot, context->estate);
2337 321492 : }
2338 :
2339 : /*
2340 : * Queues up an update event using the target root partitioned table's
2341 : * trigger to check that a cross-partition update hasn't broken any foreign
2342 : * keys pointing into it.
2343 : */
2344 : static void
2345 300 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2346 : ResultRelInfo *sourcePartInfo,
2347 : ResultRelInfo *destPartInfo,
2348 : ItemPointer tupleid,
2349 : TupleTableSlot *oldslot,
2350 : TupleTableSlot *newslot)
2351 : {
2352 : ListCell *lc;
2353 : ResultRelInfo *rootRelInfo;
2354 : List *ancestorRels;
2355 :
2356 300 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2357 300 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2358 :
2359 : /*
2360 : * For any foreign keys that point directly into a non-root ancestors of
2361 : * the source partition, we can in theory fire an update event to enforce
2362 : * those constraints using their triggers, if we could tell that both the
2363 : * source and the destination partitions are under the same ancestor. But
2364 : * for now, we simply report an error that those cannot be enforced.
2365 : */
2366 654 : foreach(lc, ancestorRels)
2367 : {
2368 360 : ResultRelInfo *rInfo = lfirst(lc);
2369 360 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2370 360 : bool has_noncloned_fkey = false;
2371 :
2372 : /* Root ancestor's triggers will be processed. */
2373 360 : if (rInfo == rootRelInfo)
2374 294 : continue;
2375 :
2376 66 : if (trigdesc && trigdesc->trig_update_after_row)
2377 : {
2378 228 : for (int i = 0; i < trigdesc->numtriggers; i++)
2379 : {
2380 168 : Trigger *trig = &trigdesc->triggers[i];
2381 :
2382 174 : if (!trig->tgisclone &&
2383 6 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2384 : {
2385 6 : has_noncloned_fkey = true;
2386 6 : break;
2387 : }
2388 : }
2389 : }
2390 :
2391 66 : if (has_noncloned_fkey)
2392 6 : ereport(ERROR,
2393 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2394 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2395 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2396 : RelationGetRelationName(rInfo->ri_RelationDesc),
2397 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2398 : errhint("Consider defining the foreign key on table \"%s\".",
2399 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2400 : }
2401 :
2402 : /* Perform the root table's triggers. */
2403 294 : ExecARUpdateTriggers(context->estate,
2404 : rootRelInfo, sourcePartInfo, destPartInfo,
2405 : tupleid, NULL, newslot, NIL, NULL, true);
2406 294 : }
2407 :
2408 : /* ----------------------------------------------------------------
2409 : * ExecUpdate
2410 : *
2411 : * note: we can't run UPDATE queries with transactions
2412 : * off because UPDATEs are actually INSERTs and our
2413 : * scan will mistakenly loop forever, updating the tuple
2414 : * it just inserted.. This should be fixed but until it
2415 : * is, we don't want to get stuck in an infinite loop
2416 : * which corrupts your database..
2417 : *
2418 : * When updating a table, tupleid identifies the tuple to update and
2419 : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2420 : * oldtuple is passed to the triggers and identifies what to update, and
2421 : * tupleid is invalid. When updating a foreign table, tupleid is
2422 : * invalid; the FDW has to figure out which row to update using data from
2423 : * the planSlot. oldtuple is passed to foreign table triggers; it is
2424 : * NULL when the foreign table has no relevant triggers.
2425 : *
2426 : * oldSlot contains the old tuple value.
2427 : * slot contains the new tuple value to be stored.
2428 : * planSlot is the output of the ModifyTable's subplan; we use it
2429 : * to access values from other input tables (for RETURNING),
2430 : * row-ID junk columns, etc.
2431 : *
2432 : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2433 : * had identified the tuple to update, it will identify the tuple
2434 : * actually updated after EvalPlanQual.
2435 : * ----------------------------------------------------------------
2436 : */
2437 : static TupleTableSlot *
2438 321196 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2439 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2440 : TupleTableSlot *slot, bool canSetTag)
2441 : {
2442 321196 : EState *estate = context->estate;
2443 321196 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2444 321196 : UpdateContext updateCxt = {0};
2445 : TM_Result result;
2446 :
2447 : /*
2448 : * abort the operation if not running transactions
2449 : */
2450 321196 : if (IsBootstrapProcessingMode())
2451 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2452 :
2453 : /*
2454 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2455 : * done if it says we are.
2456 : */
2457 321196 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2458 138 : return NULL;
2459 :
2460 : /* INSTEAD OF ROW UPDATE Triggers */
2461 321022 : if (resultRelInfo->ri_TrigDesc &&
2462 5762 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2463 : {
2464 126 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2465 : oldtuple, slot))
2466 18 : return NULL; /* "do nothing" */
2467 : }
2468 320896 : else if (resultRelInfo->ri_FdwRoutine)
2469 : {
2470 : /* Fill in GENERATEd columns */
2471 186 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2472 :
2473 : /*
2474 : * update in foreign table: let the FDW do it
2475 : */
2476 186 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2477 : resultRelInfo,
2478 : slot,
2479 : context->planSlot);
2480 :
2481 186 : if (slot == NULL) /* "do nothing" */
2482 2 : return NULL;
2483 :
2484 : /*
2485 : * AFTER ROW Triggers or RETURNING expressions might reference the
2486 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2487 : * them. (This covers the case where the FDW replaced the slot.)
2488 : */
2489 184 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2490 : }
2491 : else
2492 : {
2493 : ItemPointerData lockedtid;
2494 :
2495 : /*
2496 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2497 : * must loop back here to try again. (We don't need to redo triggers,
2498 : * however. If there are any BEFORE triggers then trigger.c will have
2499 : * done table_tuple_lock to lock the correct tuple, so there's no need
2500 : * to do them again.)
2501 : */
2502 320710 : redo_act:
2503 320810 : lockedtid = *tupleid;
2504 320810 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2505 : canSetTag, &updateCxt);
2506 :
2507 : /*
2508 : * If ExecUpdateAct reports that a cross-partition update was done,
2509 : * then the RETURNING tuple (if any) has been projected and there's
2510 : * nothing else for us to do.
2511 : */
2512 320494 : if (updateCxt.crossPartUpdate)
2513 884 : return context->cpUpdateReturningSlot;
2514 :
2515 319738 : switch (result)
2516 : {
2517 84 : case TM_SelfModified:
2518 :
2519 : /*
2520 : * The target tuple was already updated or deleted by the
2521 : * current command, or by a later command in the current
2522 : * transaction. The former case is possible in a join UPDATE
2523 : * where multiple tuples join to the same target tuple. This
2524 : * is pretty questionable, but Postgres has always allowed it:
2525 : * we just execute the first update action and ignore
2526 : * additional update attempts.
2527 : *
2528 : * The latter case arises if the tuple is modified by a
2529 : * command in a BEFORE trigger, or perhaps by a command in a
2530 : * volatile function used in the query. In such situations we
2531 : * should not ignore the update, but it is equally unsafe to
2532 : * proceed. We don't want to discard the original UPDATE
2533 : * while keeping the triggered actions based on it; and we
2534 : * have no principled way to merge this update with the
2535 : * previous ones. So throwing an error is the only safe
2536 : * course.
2537 : *
2538 : * If a trigger actually intends this type of interaction, it
2539 : * can re-execute the UPDATE (assuming it can figure out how)
2540 : * and then return NULL to cancel the outer update.
2541 : */
2542 84 : if (context->tmfd.cmax != estate->es_output_cid)
2543 6 : ereport(ERROR,
2544 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2545 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2546 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2547 :
2548 : /* Else, already updated by self; nothing to do */
2549 78 : return NULL;
2550 :
2551 319494 : case TM_Ok:
2552 319494 : break;
2553 :
2554 152 : case TM_Updated:
2555 : {
2556 : TupleTableSlot *inputslot;
2557 : TupleTableSlot *epqslot;
2558 :
2559 152 : if (IsolationUsesXactSnapshot())
2560 4 : ereport(ERROR,
2561 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2562 : errmsg("could not serialize access due to concurrent update")));
2563 :
2564 : /*
2565 : * Already know that we're going to need to do EPQ, so
2566 : * fetch tuple directly into the right slot.
2567 : */
2568 148 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2569 : resultRelInfo->ri_RangeTableIndex);
2570 :
2571 148 : result = table_tuple_lock(resultRelationDesc, tupleid,
2572 : estate->es_snapshot,
2573 : inputslot, estate->es_output_cid,
2574 : updateCxt.lockmode, LockWaitBlock,
2575 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2576 : &context->tmfd);
2577 :
2578 144 : switch (result)
2579 : {
2580 134 : case TM_Ok:
2581 : Assert(context->tmfd.traversed);
2582 :
2583 134 : epqslot = EvalPlanQual(context->epqstate,
2584 : resultRelationDesc,
2585 : resultRelInfo->ri_RangeTableIndex,
2586 : inputslot);
2587 134 : if (TupIsNull(epqslot))
2588 : /* Tuple not passing quals anymore, exiting... */
2589 34 : return NULL;
2590 :
2591 : /* Make sure ri_oldTupleSlot is initialized. */
2592 100 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2593 0 : ExecInitUpdateProjection(context->mtstate,
2594 : resultRelInfo);
2595 :
2596 100 : if (resultRelInfo->ri_needLockTagTuple)
2597 : {
2598 2 : UnlockTuple(resultRelationDesc,
2599 : &lockedtid, InplaceUpdateTupleLock);
2600 2 : LockTuple(resultRelationDesc,
2601 : tupleid, InplaceUpdateTupleLock);
2602 : }
2603 :
2604 : /* Fetch the most recent version of old tuple. */
2605 100 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2606 100 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2607 : tupleid,
2608 : SnapshotAny,
2609 : oldSlot))
2610 0 : elog(ERROR, "failed to fetch tuple being updated");
2611 100 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2612 : epqslot, oldSlot);
2613 100 : goto redo_act;
2614 :
2615 2 : case TM_Deleted:
2616 : /* tuple already deleted; nothing to do */
2617 2 : return NULL;
2618 :
2619 8 : case TM_SelfModified:
2620 :
2621 : /*
2622 : * This can be reached when following an update
2623 : * chain from a tuple updated by another session,
2624 : * reaching a tuple that was already updated in
2625 : * this transaction. If previously modified by
2626 : * this command, ignore the redundant update,
2627 : * otherwise error out.
2628 : *
2629 : * See also TM_SelfModified response to
2630 : * table_tuple_update() above.
2631 : */
2632 8 : if (context->tmfd.cmax != estate->es_output_cid)
2633 2 : ereport(ERROR,
2634 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2635 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2636 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2637 6 : return NULL;
2638 :
2639 0 : default:
2640 : /* see table_tuple_lock call in ExecDelete() */
2641 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2642 : result);
2643 : return NULL;
2644 : }
2645 : }
2646 :
2647 : break;
2648 :
2649 8 : case TM_Deleted:
2650 8 : if (IsolationUsesXactSnapshot())
2651 0 : ereport(ERROR,
2652 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2653 : errmsg("could not serialize access due to concurrent delete")));
2654 : /* tuple already deleted; nothing to do */
2655 8 : return NULL;
2656 :
2657 0 : default:
2658 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2659 : result);
2660 : return NULL;
2661 : }
2662 : }
2663 :
2664 319774 : if (canSetTag)
2665 319176 : (estate->es_processed)++;
2666 :
2667 319774 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2668 : slot);
2669 :
2670 : /* Process RETURNING if present */
2671 319614 : if (resultRelInfo->ri_projectReturning)
2672 2382 : return ExecProcessReturning(context, resultRelInfo, CMD_UPDATE,
2673 : oldSlot, slot, context->planSlot);
2674 :
2675 317232 : return NULL;
2676 : }
2677 :
2678 : /*
2679 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2680 : *
2681 : * Try to lock tuple for update as part of speculative insertion. If
2682 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2683 : * (but still lock row, even though it may not satisfy estate's
2684 : * snapshot).
2685 : *
2686 : * Returns true if we're done (with or without an update), or false if
2687 : * the caller must retry the INSERT from scratch.
2688 : */
2689 : static bool
2690 5206 : ExecOnConflictUpdate(ModifyTableContext *context,
2691 : ResultRelInfo *resultRelInfo,
2692 : ItemPointer conflictTid,
2693 : TupleTableSlot *excludedSlot,
2694 : bool canSetTag,
2695 : TupleTableSlot **returning)
2696 : {
2697 5206 : ModifyTableState *mtstate = context->mtstate;
2698 5206 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2699 5206 : Relation relation = resultRelInfo->ri_RelationDesc;
2700 5206 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2701 5206 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2702 : TM_FailureData tmfd;
2703 : LockTupleMode lockmode;
2704 : TM_Result test;
2705 : Datum xminDatum;
2706 : TransactionId xmin;
2707 : bool isnull;
2708 :
2709 : /*
2710 : * Parse analysis should have blocked ON CONFLICT for all system
2711 : * relations, which includes these. There's no fundamental obstacle to
2712 : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2713 : * ExecUpdate() caller.
2714 : */
2715 : Assert(!resultRelInfo->ri_needLockTagTuple);
2716 :
2717 : /* Determine lock mode to use */
2718 5206 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2719 :
2720 : /*
2721 : * Lock tuple for update. Don't follow updates when tuple cannot be
2722 : * locked without doing so. A row locking conflict here means our
2723 : * previous conclusion that the tuple is conclusively committed is not
2724 : * true anymore.
2725 : */
2726 5206 : test = table_tuple_lock(relation, conflictTid,
2727 5206 : context->estate->es_snapshot,
2728 5206 : existing, context->estate->es_output_cid,
2729 : lockmode, LockWaitBlock, 0,
2730 : &tmfd);
2731 5206 : switch (test)
2732 : {
2733 5182 : case TM_Ok:
2734 : /* success! */
2735 5182 : break;
2736 :
2737 24 : case TM_Invisible:
2738 :
2739 : /*
2740 : * This can occur when a just inserted tuple is updated again in
2741 : * the same command. E.g. because multiple rows with the same
2742 : * conflicting key values are inserted.
2743 : *
2744 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2745 : * case. We do not want to proceed because it would lead to the
2746 : * same row being updated a second time in some unspecified order,
2747 : * and in contrast to plain UPDATEs there's no historical behavior
2748 : * to break.
2749 : *
2750 : * It is the user's responsibility to prevent this situation from
2751 : * occurring. These problems are why the SQL standard similarly
2752 : * specifies that for SQL MERGE, an exception must be raised in
2753 : * the event of an attempt to update the same row twice.
2754 : */
2755 24 : xminDatum = slot_getsysattr(existing,
2756 : MinTransactionIdAttributeNumber,
2757 : &isnull);
2758 : Assert(!isnull);
2759 24 : xmin = DatumGetTransactionId(xminDatum);
2760 :
2761 24 : if (TransactionIdIsCurrentTransactionId(xmin))
2762 24 : ereport(ERROR,
2763 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2764 : /* translator: %s is a SQL command name */
2765 : errmsg("%s command cannot affect row a second time",
2766 : "ON CONFLICT DO UPDATE"),
2767 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2768 :
2769 : /* This shouldn't happen */
2770 0 : elog(ERROR, "attempted to lock invisible tuple");
2771 : break;
2772 :
2773 0 : case TM_SelfModified:
2774 :
2775 : /*
2776 : * This state should never be reached. As a dirty snapshot is used
2777 : * to find conflicting tuples, speculative insertion wouldn't have
2778 : * seen this row to conflict with.
2779 : */
2780 0 : elog(ERROR, "unexpected self-updated tuple");
2781 : break;
2782 :
2783 0 : case TM_Updated:
2784 0 : if (IsolationUsesXactSnapshot())
2785 0 : ereport(ERROR,
2786 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2787 : errmsg("could not serialize access due to concurrent update")));
2788 :
2789 : /*
2790 : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2791 : * a partitioned table we shouldn't reach to a case where tuple to
2792 : * be lock is moved to another partition due to concurrent update
2793 : * of the partition key.
2794 : */
2795 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2796 :
2797 : /*
2798 : * Tell caller to try again from the very start.
2799 : *
2800 : * It does not make sense to use the usual EvalPlanQual() style
2801 : * loop here, as the new version of the row might not conflict
2802 : * anymore, or the conflicting tuple has actually been deleted.
2803 : */
2804 0 : ExecClearTuple(existing);
2805 0 : return false;
2806 :
2807 0 : case TM_Deleted:
2808 0 : if (IsolationUsesXactSnapshot())
2809 0 : ereport(ERROR,
2810 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2811 : errmsg("could not serialize access due to concurrent delete")));
2812 :
2813 : /* see TM_Updated case */
2814 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2815 0 : ExecClearTuple(existing);
2816 0 : return false;
2817 :
2818 0 : default:
2819 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2820 : }
2821 :
2822 : /* Success, the tuple is locked. */
2823 :
2824 : /*
2825 : * Verify that the tuple is visible to our MVCC snapshot if the current
2826 : * isolation level mandates that.
2827 : *
2828 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2829 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2830 : *
2831 : * This means we only ever continue when a new command in the current
2832 : * transaction could see the row, even though in READ COMMITTED mode the
2833 : * tuple will not be visible according to the current statement's
2834 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2835 : * versions.
2836 : */
2837 5182 : ExecCheckTupleVisible(context->estate, relation, existing);
2838 :
2839 : /*
2840 : * Make tuple and any needed join variables available to ExecQual and
2841 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2842 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2843 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2844 : * other redirection.
2845 : */
2846 5182 : econtext->ecxt_scantuple = existing;
2847 5182 : econtext->ecxt_innertuple = excludedSlot;
2848 5182 : econtext->ecxt_outertuple = NULL;
2849 :
2850 5182 : if (!ExecQual(onConflictSetWhere, econtext))
2851 : {
2852 32 : ExecClearTuple(existing); /* see return below */
2853 32 : InstrCountFiltered1(&mtstate->ps, 1);
2854 32 : return true; /* done with the tuple */
2855 : }
2856 :
2857 5150 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2858 : {
2859 : /*
2860 : * Check target's existing tuple against UPDATE-applicable USING
2861 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2862 : *
2863 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2864 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2865 : * but that's almost the extent of its special handling for ON
2866 : * CONFLICT DO UPDATE.
2867 : *
2868 : * The rewriter will also have associated UPDATE applicable straight
2869 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2870 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2871 : * kinds, so there is no danger of spurious over-enforcement in the
2872 : * INSERT or UPDATE path.
2873 : */
2874 60 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2875 : existing,
2876 : mtstate->ps.state);
2877 : }
2878 :
2879 : /* Project the new tuple version */
2880 5126 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2881 :
2882 : /*
2883 : * Note that it is possible that the target tuple has been modified in
2884 : * this session, after the above table_tuple_lock. We choose to not error
2885 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2886 : * This can happen if an UPDATE is triggered from within ExecQual(),
2887 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2888 : * wCTE in the ON CONFLICT's SET.
2889 : */
2890 :
2891 : /* Execute UPDATE with projection */
2892 10222 : *returning = ExecUpdate(context, resultRelInfo,
2893 : conflictTid, NULL, existing,
2894 5126 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2895 : canSetTag);
2896 :
2897 : /*
2898 : * Clear out existing tuple, as there might not be another conflict among
2899 : * the next input rows. Don't want to hold resources till the end of the
2900 : * query. First though, make sure that the returning slot, if any, has a
2901 : * local copy of any OLD pass-by-reference values, if it refers to any OLD
2902 : * columns.
2903 : */
2904 5096 : if (*returning != NULL &&
2905 222 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
2906 6 : ExecMaterializeSlot(*returning);
2907 :
2908 5096 : ExecClearTuple(existing);
2909 :
2910 5096 : return true;
2911 : }
2912 :
2913 : /*
2914 : * Perform MERGE.
2915 : */
2916 : static TupleTableSlot *
2917 14692 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2918 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2919 : {
2920 14692 : TupleTableSlot *rslot = NULL;
2921 : bool matched;
2922 :
2923 : /*-----
2924 : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2925 : * valid, depending on whether the result relation is a table or a view.
2926 : * We execute the first action for which the additional WHEN MATCHED AND
2927 : * quals pass. If an action without quals is found, that action is
2928 : * executed.
2929 : *
2930 : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2931 : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2932 : * in sequence until one passes. This is almost identical to the WHEN
2933 : * MATCHED case, and both cases are handled by ExecMergeMatched().
2934 : *
2935 : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2936 : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2937 : * TARGET] actions in sequence until one passes.
2938 : *
2939 : * Things get interesting in case of concurrent update/delete of the
2940 : * target tuple. Such concurrent update/delete is detected while we are
2941 : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2942 : *
2943 : * A concurrent update can:
2944 : *
2945 : * 1. modify the target tuple so that the results from checking any
2946 : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2947 : * SOURCE actions potentially change, but the result from the join
2948 : * quals does not change.
2949 : *
2950 : * In this case, we are still dealing with the same kind of match
2951 : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2952 : * actions from the start and choose the first one that satisfies the
2953 : * new target tuple.
2954 : *
2955 : * 2. modify the target tuple in the WHEN MATCHED case so that the join
2956 : * quals no longer pass and hence the source and target tuples no
2957 : * longer match.
2958 : *
2959 : * In this case, we are now dealing with a NOT MATCHED case, and we
2960 : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2961 : * TARGET] actions. First ExecMergeMatched() processes the list of
2962 : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2963 : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2964 : * TARGET] actions in sequence until one passes. Thus we may execute
2965 : * two actions; one of each kind.
2966 : *
2967 : * Thus we support concurrent updates that turn MATCHED candidate rows
2968 : * into NOT MATCHED rows. However, we do not attempt to support cases
2969 : * that would turn NOT MATCHED rows into MATCHED rows, or which would
2970 : * cause a target row to match a different source row.
2971 : *
2972 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2973 : * [BY TARGET].
2974 : *
2975 : * ExecMergeMatched() takes care of following the update chain and
2976 : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
2977 : * action, as long as the target tuple still exists. If the target tuple
2978 : * gets deleted or a concurrent update causes the join quals to fail, it
2979 : * returns a matched status of false and we call ExecMergeNotMatched().
2980 : * Given that ExecMergeMatched() always makes progress by following the
2981 : * update chain and we never switch from ExecMergeNotMatched() to
2982 : * ExecMergeMatched(), there is no risk of a livelock.
2983 : */
2984 14692 : matched = tupleid != NULL || oldtuple != NULL;
2985 14692 : if (matched)
2986 12044 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
2987 : canSetTag, &matched);
2988 :
2989 : /*
2990 : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
2991 : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
2992 : * "matched" to false, indicating that it no longer matches).
2993 : */
2994 14602 : if (!matched)
2995 : {
2996 : /*
2997 : * If a concurrent update turned a MATCHED case into a NOT MATCHED
2998 : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
2999 : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
3000 : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
3001 : * SOURCE action, and computed the row to return. If so, we cannot
3002 : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
3003 : * pending (to be processed on the next call to ExecModifyTable()).
3004 : * Otherwise, just process the action now.
3005 : */
3006 2664 : if (rslot == NULL)
3007 2662 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
3008 : else
3009 2 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
3010 : }
3011 :
3012 14548 : return rslot;
3013 : }
3014 :
3015 : /*
3016 : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3017 : * action, depending on whether the join quals are satisfied. If the target
3018 : * relation is a table, the current target tuple is identified by tupleid.
3019 : * Otherwise, if the target relation is a view, oldtuple is the current target
3020 : * tuple from the view.
3021 : *
3022 : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3023 : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3024 : * action do not pass, we check the second, then the third and so on. If we
3025 : * reach the end without finding a qualifying action, we return NULL.
3026 : * Otherwise, we execute the qualifying action and return its RETURNING
3027 : * result, if any, or NULL.
3028 : *
3029 : * On entry, "*matched" is assumed to be true. If a concurrent update or
3030 : * delete is detected that causes the join quals to no longer pass, we set it
3031 : * to false, indicating that the caller should process any NOT MATCHED [BY
3032 : * TARGET] actions.
3033 : *
3034 : * After a concurrent update, we restart from the first action to look for a
3035 : * new qualifying action to execute. If the join quals originally passed, and
3036 : * the concurrent update caused them to no longer pass, then we switch from
3037 : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3038 : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3039 : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3040 : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3041 : */
3042 : static TupleTableSlot *
3043 12044 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3044 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3045 : bool *matched)
3046 : {
3047 12044 : ModifyTableState *mtstate = context->mtstate;
3048 12044 : List **mergeActions = resultRelInfo->ri_MergeActions;
3049 : ItemPointerData lockedtid;
3050 : List *actionStates;
3051 12044 : TupleTableSlot *newslot = NULL;
3052 12044 : TupleTableSlot *rslot = NULL;
3053 12044 : EState *estate = context->estate;
3054 12044 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3055 : bool isNull;
3056 12044 : EPQState *epqstate = &mtstate->mt_epqstate;
3057 : ListCell *l;
3058 :
3059 : /* Expect matched to be true on entry */
3060 : Assert(*matched);
3061 :
3062 : /*
3063 : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3064 : * are done.
3065 : */
3066 12044 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3067 1200 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
3068 528 : return NULL;
3069 :
3070 : /*
3071 : * Make tuple and any needed join variables available to ExecQual and
3072 : * ExecProject. The target's existing tuple is installed in the scantuple.
3073 : * This target relation's slot is required only in the case of a MATCHED
3074 : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3075 : */
3076 11516 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3077 11516 : econtext->ecxt_innertuple = context->planSlot;
3078 11516 : econtext->ecxt_outertuple = NULL;
3079 :
3080 : /*
3081 : * This routine is only invoked for matched target rows, so we should
3082 : * either have the tupleid of the target row, or an old tuple from the
3083 : * target wholerow junk attr.
3084 : */
3085 : Assert(tupleid != NULL || oldtuple != NULL);
3086 11516 : ItemPointerSetInvalid(&lockedtid);
3087 11516 : if (oldtuple != NULL)
3088 : {
3089 : Assert(!resultRelInfo->ri_needLockTagTuple);
3090 96 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3091 : false);
3092 : }
3093 : else
3094 : {
3095 11420 : if (resultRelInfo->ri_needLockTagTuple)
3096 : {
3097 : /*
3098 : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3099 : * that don't match mas_whenqual. MERGE on system catalogs is a
3100 : * minor use case, so don't bother optimizing those.
3101 : */
3102 7490 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3103 : InplaceUpdateTupleLock);
3104 7490 : lockedtid = *tupleid;
3105 : }
3106 11420 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
3107 : tupleid,
3108 : SnapshotAny,
3109 : resultRelInfo->ri_oldTupleSlot))
3110 0 : elog(ERROR, "failed to fetch the target tuple");
3111 : }
3112 :
3113 : /*
3114 : * Test the join condition. If it's satisfied, perform a MATCHED action.
3115 : * Otherwise, perform a NOT MATCHED BY SOURCE action.
3116 : *
3117 : * Note that this join condition will be NULL if there are no NOT MATCHED
3118 : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3119 : * need only consider MATCHED actions here.
3120 : */
3121 11516 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3122 11334 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
3123 : else
3124 182 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3125 :
3126 11516 : lmerge_matched:
3127 :
3128 20522 : foreach(l, actionStates)
3129 : {
3130 11644 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
3131 11644 : CmdType commandType = relaction->mas_action->commandType;
3132 : TM_Result result;
3133 11644 : UpdateContext updateCxt = {0};
3134 :
3135 : /*
3136 : * Test condition, if any.
3137 : *
3138 : * In the absence of any condition, we perform the action
3139 : * unconditionally (no need to check separately since ExecQual() will
3140 : * return true if there are no conditions to evaluate).
3141 : */
3142 11644 : if (!ExecQual(relaction->mas_whenqual, econtext))
3143 8942 : continue;
3144 :
3145 : /*
3146 : * Check if the existing target tuple meets the USING checks of
3147 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3148 : * error.
3149 : *
3150 : * The WITH CHECK quals for UPDATE RLS policies are applied in
3151 : * ExecUpdateAct() and hence we need not do anything special to handle
3152 : * them.
3153 : *
3154 : * NOTE: We must do this after WHEN quals are evaluated, so that we
3155 : * check policies only when they matter.
3156 : */
3157 2702 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3158 : {
3159 90 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
3160 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
3161 : resultRelInfo,
3162 : resultRelInfo->ri_oldTupleSlot,
3163 90 : context->mtstate->ps.state);
3164 : }
3165 :
3166 : /* Perform stated action */
3167 2678 : switch (commandType)
3168 : {
3169 2134 : case CMD_UPDATE:
3170 :
3171 : /*
3172 : * Project the output tuple, and use that to update the table.
3173 : * We don't need to filter out junk attributes, because the
3174 : * UPDATE action's targetlist doesn't have any.
3175 : */
3176 2134 : newslot = ExecProject(relaction->mas_proj);
3177 :
3178 2134 : mtstate->mt_merge_action = relaction;
3179 2134 : if (!ExecUpdatePrologue(context, resultRelInfo,
3180 : tupleid, NULL, newslot, &result))
3181 : {
3182 18 : if (result == TM_Ok)
3183 156 : goto out; /* "do nothing" */
3184 :
3185 12 : break; /* concurrent update/delete */
3186 : }
3187 :
3188 : /* INSTEAD OF ROW UPDATE Triggers */
3189 2116 : if (resultRelInfo->ri_TrigDesc &&
3190 334 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3191 : {
3192 78 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3193 : oldtuple, newslot))
3194 0 : goto out; /* "do nothing" */
3195 : }
3196 : else
3197 : {
3198 : /* checked ri_needLockTagTuple above */
3199 : Assert(oldtuple == NULL);
3200 :
3201 2038 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3202 : NULL, newslot, canSetTag,
3203 : &updateCxt);
3204 :
3205 : /*
3206 : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3207 : * cross-partition update was done, then there's nothing
3208 : * else for us to do --- the UPDATE has been turned into a
3209 : * DELETE and an INSERT, and we must not perform any of
3210 : * the usual post-update tasks. Also, the RETURNING tuple
3211 : * (if any) has been projected, so we can just return
3212 : * that.
3213 : */
3214 2018 : if (updateCxt.crossPartUpdate)
3215 : {
3216 134 : mtstate->mt_merge_updated += 1;
3217 134 : rslot = context->cpUpdateReturningSlot;
3218 134 : goto out;
3219 : }
3220 : }
3221 :
3222 1962 : if (result == TM_Ok)
3223 : {
3224 1890 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3225 : tupleid, NULL, newslot);
3226 1878 : mtstate->mt_merge_updated += 1;
3227 : }
3228 1950 : break;
3229 :
3230 514 : case CMD_DELETE:
3231 514 : mtstate->mt_merge_action = relaction;
3232 514 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3233 : NULL, NULL, &result))
3234 : {
3235 12 : if (result == TM_Ok)
3236 6 : goto out; /* "do nothing" */
3237 :
3238 6 : break; /* concurrent update/delete */
3239 : }
3240 :
3241 : /* INSTEAD OF ROW DELETE Triggers */
3242 502 : if (resultRelInfo->ri_TrigDesc &&
3243 44 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3244 : {
3245 6 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3246 : oldtuple))
3247 0 : goto out; /* "do nothing" */
3248 : }
3249 : else
3250 : {
3251 : /* checked ri_needLockTagTuple above */
3252 : Assert(oldtuple == NULL);
3253 :
3254 496 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3255 : false);
3256 : }
3257 :
3258 502 : if (result == TM_Ok)
3259 : {
3260 484 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3261 : false);
3262 484 : mtstate->mt_merge_deleted += 1;
3263 : }
3264 502 : break;
3265 :
3266 30 : case CMD_NOTHING:
3267 : /* Doing nothing is always OK */
3268 30 : result = TM_Ok;
3269 30 : break;
3270 :
3271 0 : default:
3272 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3273 : }
3274 :
3275 2500 : switch (result)
3276 : {
3277 2392 : case TM_Ok:
3278 : /* all good; perform final actions */
3279 2392 : if (canSetTag && commandType != CMD_NOTHING)
3280 2344 : (estate->es_processed)++;
3281 :
3282 2392 : break;
3283 :
3284 32 : case TM_SelfModified:
3285 :
3286 : /*
3287 : * The target tuple was already updated or deleted by the
3288 : * current command, or by a later command in the current
3289 : * transaction. The former case is explicitly disallowed by
3290 : * the SQL standard for MERGE, which insists that the MERGE
3291 : * join condition should not join a target row to more than
3292 : * one source row.
3293 : *
3294 : * The latter case arises if the tuple is modified by a
3295 : * command in a BEFORE trigger, or perhaps by a command in a
3296 : * volatile function used in the query. In such situations we
3297 : * should not ignore the MERGE action, but it is equally
3298 : * unsafe to proceed. We don't want to discard the original
3299 : * MERGE action while keeping the triggered actions based on
3300 : * it; and it would be no better to allow the original MERGE
3301 : * action while discarding the updates that it triggered. So
3302 : * throwing an error is the only safe course.
3303 : */
3304 32 : if (context->tmfd.cmax != estate->es_output_cid)
3305 12 : ereport(ERROR,
3306 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3307 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3308 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3309 :
3310 20 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3311 20 : ereport(ERROR,
3312 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3313 : /* translator: %s is a SQL command name */
3314 : errmsg("%s command cannot affect row a second time",
3315 : "MERGE"),
3316 : errhint("Ensure that not more than one source row matches any one target row.")));
3317 :
3318 : /* This shouldn't happen */
3319 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3320 : break;
3321 :
3322 10 : case TM_Deleted:
3323 10 : if (IsolationUsesXactSnapshot())
3324 0 : ereport(ERROR,
3325 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3326 : errmsg("could not serialize access due to concurrent delete")));
3327 :
3328 : /*
3329 : * If the tuple was already deleted, set matched to false to
3330 : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3331 : */
3332 10 : *matched = false;
3333 10 : goto out;
3334 :
3335 66 : case TM_Updated:
3336 : {
3337 : bool was_matched;
3338 : Relation resultRelationDesc;
3339 : TupleTableSlot *epqslot,
3340 : *inputslot;
3341 : LockTupleMode lockmode;
3342 :
3343 : /*
3344 : * The target tuple was concurrently updated by some other
3345 : * transaction. If we are currently processing a MATCHED
3346 : * action, use EvalPlanQual() with the new version of the
3347 : * tuple and recheck the join qual, to detect a change
3348 : * from the MATCHED to the NOT MATCHED cases. If we are
3349 : * already processing a NOT MATCHED BY SOURCE action, we
3350 : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3351 : * MATCHED).
3352 : */
3353 66 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3354 66 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3355 66 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3356 :
3357 66 : if (was_matched)
3358 66 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3359 : resultRelInfo->ri_RangeTableIndex);
3360 : else
3361 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3362 :
3363 66 : result = table_tuple_lock(resultRelationDesc, tupleid,
3364 : estate->es_snapshot,
3365 : inputslot, estate->es_output_cid,
3366 : lockmode, LockWaitBlock,
3367 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3368 : &context->tmfd);
3369 66 : switch (result)
3370 : {
3371 64 : case TM_Ok:
3372 :
3373 : /*
3374 : * If the tuple was updated and migrated to
3375 : * another partition concurrently, the current
3376 : * MERGE implementation can't follow. There's
3377 : * probably a better way to handle this case, but
3378 : * it'd require recognizing the relation to which
3379 : * the tuple moved, and setting our current
3380 : * resultRelInfo to that.
3381 : */
3382 64 : if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid))
3383 0 : ereport(ERROR,
3384 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3385 : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3386 :
3387 : /*
3388 : * If this was a MATCHED case, use EvalPlanQual()
3389 : * to recheck the join condition.
3390 : */
3391 64 : if (was_matched)
3392 : {
3393 64 : epqslot = EvalPlanQual(epqstate,
3394 : resultRelationDesc,
3395 : resultRelInfo->ri_RangeTableIndex,
3396 : inputslot);
3397 :
3398 : /*
3399 : * If the subplan didn't return a tuple, then
3400 : * we must be dealing with an inner join for
3401 : * which the join condition no longer matches.
3402 : * This can only happen if there are no NOT
3403 : * MATCHED actions, and so there is nothing
3404 : * more to do.
3405 : */
3406 64 : if (TupIsNull(epqslot))
3407 0 : goto out;
3408 :
3409 : /*
3410 : * If we got a NULL ctid from the subplan, the
3411 : * join quals no longer pass and we switch to
3412 : * the NOT MATCHED BY SOURCE case.
3413 : */
3414 64 : (void) ExecGetJunkAttribute(epqslot,
3415 64 : resultRelInfo->ri_RowIdAttNo,
3416 : &isNull);
3417 64 : if (isNull)
3418 4 : *matched = false;
3419 :
3420 : /*
3421 : * Otherwise, recheck the join quals to see if
3422 : * we need to switch to the NOT MATCHED BY
3423 : * SOURCE case.
3424 : */
3425 64 : if (resultRelInfo->ri_needLockTagTuple)
3426 : {
3427 2 : if (ItemPointerIsValid(&lockedtid))
3428 2 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3429 : InplaceUpdateTupleLock);
3430 2 : LockTuple(resultRelInfo->ri_RelationDesc, &context->tmfd.ctid,
3431 : InplaceUpdateTupleLock);
3432 2 : lockedtid = context->tmfd.ctid;
3433 : }
3434 64 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3435 : &context->tmfd.ctid,
3436 : SnapshotAny,
3437 : resultRelInfo->ri_oldTupleSlot))
3438 0 : elog(ERROR, "failed to fetch the target tuple");
3439 :
3440 64 : if (*matched)
3441 60 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3442 : econtext);
3443 :
3444 : /* Switch lists, if necessary */
3445 64 : if (!*matched)
3446 6 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3447 : }
3448 :
3449 : /*
3450 : * Loop back and process the MATCHED or NOT
3451 : * MATCHED BY SOURCE actions from the start.
3452 : */
3453 64 : goto lmerge_matched;
3454 :
3455 0 : case TM_Deleted:
3456 :
3457 : /*
3458 : * tuple already deleted; tell caller to run NOT
3459 : * MATCHED [BY TARGET] actions
3460 : */
3461 0 : *matched = false;
3462 0 : goto out;
3463 :
3464 2 : case TM_SelfModified:
3465 :
3466 : /*
3467 : * This can be reached when following an update
3468 : * chain from a tuple updated by another session,
3469 : * reaching a tuple that was already updated or
3470 : * deleted by the current command, or by a later
3471 : * command in the current transaction. As above,
3472 : * this should always be treated as an error.
3473 : */
3474 2 : if (context->tmfd.cmax != estate->es_output_cid)
3475 0 : ereport(ERROR,
3476 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3477 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3478 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3479 :
3480 2 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3481 2 : ereport(ERROR,
3482 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3483 : /* translator: %s is a SQL command name */
3484 : errmsg("%s command cannot affect row a second time",
3485 : "MERGE"),
3486 : errhint("Ensure that not more than one source row matches any one target row.")));
3487 :
3488 : /* This shouldn't happen */
3489 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3490 : goto out;
3491 :
3492 0 : default:
3493 : /* see table_tuple_lock call in ExecDelete() */
3494 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3495 : result);
3496 : goto out;
3497 : }
3498 : }
3499 :
3500 0 : case TM_Invisible:
3501 : case TM_WouldBlock:
3502 : case TM_BeingModified:
3503 : /* these should not occur */
3504 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3505 : break;
3506 : }
3507 :
3508 : /* Process RETURNING if present */
3509 2392 : if (resultRelInfo->ri_projectReturning)
3510 : {
3511 408 : switch (commandType)
3512 : {
3513 174 : case CMD_UPDATE:
3514 174 : rslot = ExecProcessReturning(context,
3515 : resultRelInfo,
3516 : CMD_UPDATE,
3517 : resultRelInfo->ri_oldTupleSlot,
3518 : newslot,
3519 : context->planSlot);
3520 174 : break;
3521 :
3522 234 : case CMD_DELETE:
3523 234 : rslot = ExecProcessReturning(context,
3524 : resultRelInfo,
3525 : CMD_DELETE,
3526 : resultRelInfo->ri_oldTupleSlot,
3527 : NULL,
3528 : context->planSlot);
3529 234 : break;
3530 :
3531 0 : case CMD_NOTHING:
3532 0 : break;
3533 :
3534 0 : default:
3535 0 : elog(ERROR, "unrecognized commandType: %d",
3536 : (int) commandType);
3537 : }
3538 : }
3539 :
3540 : /*
3541 : * We've activated one of the WHEN clauses, so we don't search
3542 : * further. This is required behaviour, not an optimization.
3543 : */
3544 2392 : break;
3545 : }
3546 :
3547 : /*
3548 : * Successfully executed an action or no qualifying action was found.
3549 : */
3550 11426 : out:
3551 11426 : if (ItemPointerIsValid(&lockedtid))
3552 7490 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3553 : InplaceUpdateTupleLock);
3554 11426 : return rslot;
3555 : }
3556 :
3557 : /*
3558 : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3559 : */
3560 : static TupleTableSlot *
3561 2664 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3562 : bool canSetTag)
3563 : {
3564 2664 : ModifyTableState *mtstate = context->mtstate;
3565 2664 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3566 : List *actionStates;
3567 2664 : TupleTableSlot *rslot = NULL;
3568 : ListCell *l;
3569 :
3570 : /*
3571 : * For INSERT actions, the root relation's merge action is OK since the
3572 : * INSERT's targetlist and the WHEN conditions can only refer to the
3573 : * source relation and hence it does not matter which result relation we
3574 : * work with.
3575 : *
3576 : * XXX does this mean that we can avoid creating copies of actionStates on
3577 : * partitioned tables, for not-matched actions?
3578 : */
3579 2664 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3580 :
3581 : /*
3582 : * Make source tuple available to ExecQual and ExecProject. We don't need
3583 : * the target tuple, since the WHEN quals and targetlist can't refer to
3584 : * the target columns.
3585 : */
3586 2664 : econtext->ecxt_scantuple = NULL;
3587 2664 : econtext->ecxt_innertuple = context->planSlot;
3588 2664 : econtext->ecxt_outertuple = NULL;
3589 :
3590 3534 : foreach(l, actionStates)
3591 : {
3592 2664 : MergeActionState *action = (MergeActionState *) lfirst(l);
3593 2664 : CmdType commandType = action->mas_action->commandType;
3594 : TupleTableSlot *newslot;
3595 :
3596 : /*
3597 : * Test condition, if any.
3598 : *
3599 : * In the absence of any condition, we perform the action
3600 : * unconditionally (no need to check separately since ExecQual() will
3601 : * return true if there are no conditions to evaluate).
3602 : */
3603 2664 : if (!ExecQual(action->mas_whenqual, econtext))
3604 870 : continue;
3605 :
3606 : /* Perform stated action */
3607 1794 : switch (commandType)
3608 : {
3609 1794 : case CMD_INSERT:
3610 :
3611 : /*
3612 : * Project the tuple. In case of a partitioned table, the
3613 : * projection was already built to use the root's descriptor,
3614 : * so we don't need to map the tuple here.
3615 : */
3616 1794 : newslot = ExecProject(action->mas_proj);
3617 1794 : mtstate->mt_merge_action = action;
3618 :
3619 1794 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3620 : newslot, canSetTag, NULL, NULL);
3621 1740 : mtstate->mt_merge_inserted += 1;
3622 1740 : break;
3623 0 : case CMD_NOTHING:
3624 : /* Do nothing */
3625 0 : break;
3626 0 : default:
3627 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3628 : }
3629 :
3630 : /*
3631 : * We've activated one of the WHEN clauses, so we don't search
3632 : * further. This is required behaviour, not an optimization.
3633 : */
3634 1740 : break;
3635 : }
3636 :
3637 2610 : return rslot;
3638 : }
3639 :
3640 : /*
3641 : * Initialize state for execution of MERGE.
3642 : */
3643 : void
3644 1504 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3645 : {
3646 1504 : List *mergeActionLists = mtstate->mt_mergeActionLists;
3647 1504 : List *mergeJoinConditions = mtstate->mt_mergeJoinConditions;
3648 1504 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3649 : ResultRelInfo *resultRelInfo;
3650 : ExprContext *econtext;
3651 : ListCell *lc;
3652 : int i;
3653 :
3654 1504 : if (mergeActionLists == NIL)
3655 0 : return;
3656 :
3657 1504 : mtstate->mt_merge_subcommands = 0;
3658 :
3659 1504 : if (mtstate->ps.ps_ExprContext == NULL)
3660 1254 : ExecAssignExprContext(estate, &mtstate->ps);
3661 1504 : econtext = mtstate->ps.ps_ExprContext;
3662 :
3663 : /*
3664 : * Create a MergeActionState for each action on the mergeActionList and
3665 : * add it to either a list of matched actions or not-matched actions.
3666 : *
3667 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3668 : * anything here, do so there too.
3669 : */
3670 1504 : i = 0;
3671 3240 : foreach(lc, mergeActionLists)
3672 : {
3673 1736 : List *mergeActionList = lfirst(lc);
3674 : Node *joinCondition;
3675 : TupleDesc relationDesc;
3676 : ListCell *l;
3677 :
3678 1736 : joinCondition = (Node *) list_nth(mergeJoinConditions, i);
3679 1736 : resultRelInfo = mtstate->resultRelInfo + i;
3680 1736 : i++;
3681 1736 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3682 :
3683 : /* initialize slots for MERGE fetches from this rel */
3684 1736 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3685 1736 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3686 :
3687 : /* initialize state for join condition checking */
3688 1736 : resultRelInfo->ri_MergeJoinCondition =
3689 1736 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3690 :
3691 4796 : foreach(l, mergeActionList)
3692 : {
3693 3060 : MergeAction *action = (MergeAction *) lfirst(l);
3694 : MergeActionState *action_state;
3695 : TupleTableSlot *tgtslot;
3696 : TupleDesc tgtdesc;
3697 :
3698 : /*
3699 : * Build action merge state for this rel. (For partitions,
3700 : * equivalent code exists in ExecInitPartitionInfo.)
3701 : */
3702 3060 : action_state = makeNode(MergeActionState);
3703 3060 : action_state->mas_action = action;
3704 3060 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3705 : &mtstate->ps);
3706 :
3707 : /*
3708 : * We create three lists - one for each MergeMatchKind - and stick
3709 : * the MergeActionState into the appropriate list.
3710 : */
3711 6120 : resultRelInfo->ri_MergeActions[action->matchKind] =
3712 3060 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3713 : action_state);
3714 :
3715 3060 : switch (action->commandType)
3716 : {
3717 1016 : case CMD_INSERT:
3718 1016 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3719 : action->targetList);
3720 :
3721 : /*
3722 : * If the MERGE targets a partitioned table, any INSERT
3723 : * actions must be routed through it, not the child
3724 : * relations. Initialize the routing struct and the root
3725 : * table's "new" tuple slot for that, if not already done.
3726 : * The projection we prepare, for all relations, uses the
3727 : * root relation descriptor, and targets the plan's root
3728 : * slot. (This is consistent with the fact that we
3729 : * checked the plan output to match the root relation,
3730 : * above.)
3731 : */
3732 1016 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3733 : RELKIND_PARTITIONED_TABLE)
3734 : {
3735 328 : if (mtstate->mt_partition_tuple_routing == NULL)
3736 : {
3737 : /*
3738 : * Initialize planstate for routing if not already
3739 : * done.
3740 : *
3741 : * Note that the slot is managed as a standalone
3742 : * slot belonging to ModifyTableState, so we pass
3743 : * NULL for the 2nd argument.
3744 : */
3745 154 : mtstate->mt_root_tuple_slot =
3746 154 : table_slot_create(rootRelInfo->ri_RelationDesc,
3747 : NULL);
3748 154 : mtstate->mt_partition_tuple_routing =
3749 154 : ExecSetupPartitionTupleRouting(estate,
3750 : rootRelInfo->ri_RelationDesc);
3751 : }
3752 328 : tgtslot = mtstate->mt_root_tuple_slot;
3753 328 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3754 : }
3755 : else
3756 : {
3757 : /* not partitioned? use the stock relation and slot */
3758 688 : tgtslot = resultRelInfo->ri_newTupleSlot;
3759 688 : tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3760 : }
3761 :
3762 1016 : action_state->mas_proj =
3763 1016 : ExecBuildProjectionInfo(action->targetList, econtext,
3764 : tgtslot,
3765 : &mtstate->ps,
3766 : tgtdesc);
3767 :
3768 1016 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3769 1016 : break;
3770 1528 : case CMD_UPDATE:
3771 1528 : action_state->mas_proj =
3772 1528 : ExecBuildUpdateProjection(action->targetList,
3773 : true,
3774 : action->updateColnos,
3775 : relationDesc,
3776 : econtext,
3777 : resultRelInfo->ri_newTupleSlot,
3778 : &mtstate->ps);
3779 1528 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3780 1528 : break;
3781 452 : case CMD_DELETE:
3782 452 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3783 452 : break;
3784 64 : case CMD_NOTHING:
3785 64 : break;
3786 0 : default:
3787 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3788 : break;
3789 : }
3790 : }
3791 : }
3792 : }
3793 :
3794 : /*
3795 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3796 : *
3797 : * We mark 'projectNewInfoValid' even though the projections themselves
3798 : * are not initialized here.
3799 : */
3800 : void
3801 1760 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3802 : ResultRelInfo *resultRelInfo)
3803 : {
3804 1760 : EState *estate = mtstate->ps.state;
3805 :
3806 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3807 :
3808 1760 : resultRelInfo->ri_oldTupleSlot =
3809 1760 : table_slot_create(resultRelInfo->ri_RelationDesc,
3810 : &estate->es_tupleTable);
3811 1760 : resultRelInfo->ri_newTupleSlot =
3812 1760 : table_slot_create(resultRelInfo->ri_RelationDesc,
3813 : &estate->es_tupleTable);
3814 1760 : resultRelInfo->ri_projectNewInfoValid = true;
3815 1760 : }
3816 :
3817 : /*
3818 : * Process BEFORE EACH STATEMENT triggers
3819 : */
3820 : static void
3821 119466 : fireBSTriggers(ModifyTableState *node)
3822 : {
3823 119466 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3824 119466 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3825 :
3826 119466 : switch (node->operation)
3827 : {
3828 92622 : case CMD_INSERT:
3829 92622 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3830 92610 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3831 834 : ExecBSUpdateTriggers(node->ps.state,
3832 : resultRelInfo);
3833 92610 : break;
3834 13360 : case CMD_UPDATE:
3835 13360 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3836 13360 : break;
3837 12124 : case CMD_DELETE:
3838 12124 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3839 12124 : break;
3840 1360 : case CMD_MERGE:
3841 1360 : if (node->mt_merge_subcommands & MERGE_INSERT)
3842 746 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3843 1360 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3844 936 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3845 1360 : if (node->mt_merge_subcommands & MERGE_DELETE)
3846 368 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3847 1360 : break;
3848 0 : default:
3849 0 : elog(ERROR, "unknown operation");
3850 : break;
3851 : }
3852 119454 : }
3853 :
3854 : /*
3855 : * Process AFTER EACH STATEMENT triggers
3856 : */
3857 : static void
3858 116186 : fireASTriggers(ModifyTableState *node)
3859 : {
3860 116186 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3861 116186 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3862 :
3863 116186 : switch (node->operation)
3864 : {
3865 90320 : case CMD_INSERT:
3866 90320 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3867 732 : ExecASUpdateTriggers(node->ps.state,
3868 : resultRelInfo,
3869 732 : node->mt_oc_transition_capture);
3870 90320 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3871 90320 : node->mt_transition_capture);
3872 90320 : break;
3873 12650 : case CMD_UPDATE:
3874 12650 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3875 12650 : node->mt_transition_capture);
3876 12650 : break;
3877 12000 : case CMD_DELETE:
3878 12000 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3879 12000 : node->mt_transition_capture);
3880 12000 : break;
3881 1216 : case CMD_MERGE:
3882 1216 : if (node->mt_merge_subcommands & MERGE_DELETE)
3883 332 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3884 332 : node->mt_transition_capture);
3885 1216 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3886 840 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3887 840 : node->mt_transition_capture);
3888 1216 : if (node->mt_merge_subcommands & MERGE_INSERT)
3889 684 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3890 684 : node->mt_transition_capture);
3891 1216 : break;
3892 0 : default:
3893 0 : elog(ERROR, "unknown operation");
3894 : break;
3895 : }
3896 116186 : }
3897 :
3898 : /*
3899 : * Set up the state needed for collecting transition tuples for AFTER
3900 : * triggers.
3901 : */
3902 : static void
3903 119808 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
3904 : {
3905 119808 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
3906 119808 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
3907 :
3908 : /* Check for transition tables on the directly targeted relation. */
3909 119808 : mtstate->mt_transition_capture =
3910 119808 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3911 119808 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3912 : mtstate->operation);
3913 119808 : if (plan->operation == CMD_INSERT &&
3914 92624 : plan->onConflictAction == ONCONFLICT_UPDATE)
3915 834 : mtstate->mt_oc_transition_capture =
3916 834 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3917 834 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3918 : CMD_UPDATE);
3919 119808 : }
3920 :
3921 : /*
3922 : * ExecPrepareTupleRouting --- prepare for routing one tuple
3923 : *
3924 : * Determine the partition in which the tuple in slot is to be inserted,
3925 : * and return its ResultRelInfo in *partRelInfo. The return value is
3926 : * a slot holding the tuple of the partition rowtype.
3927 : *
3928 : * This also sets the transition table information in mtstate based on the
3929 : * selected partition.
3930 : */
3931 : static TupleTableSlot *
3932 728270 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
3933 : EState *estate,
3934 : PartitionTupleRouting *proute,
3935 : ResultRelInfo *targetRelInfo,
3936 : TupleTableSlot *slot,
3937 : ResultRelInfo **partRelInfo)
3938 : {
3939 : ResultRelInfo *partrel;
3940 : TupleConversionMap *map;
3941 :
3942 : /*
3943 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3944 : * not find a valid partition for the tuple in 'slot' then an error is
3945 : * raised. An error may also be raised if the found partition is not a
3946 : * valid target for INSERTs. This is required since a partitioned table
3947 : * UPDATE to another partition becomes a DELETE+INSERT.
3948 : */
3949 728270 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3950 :
3951 : /*
3952 : * If we're capturing transition tuples, we might need to convert from the
3953 : * partition rowtype to root partitioned table's rowtype. But if there
3954 : * are no BEFORE triggers on the partition that could change the tuple, we
3955 : * can just remember the original unconverted tuple to avoid a needless
3956 : * round trip conversion.
3957 : */
3958 728066 : if (mtstate->mt_transition_capture != NULL)
3959 : {
3960 : bool has_before_insert_row_trig;
3961 :
3962 192 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3963 42 : partrel->ri_TrigDesc->trig_insert_before_row);
3964 :
3965 150 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
3966 150 : !has_before_insert_row_trig ? slot : NULL;
3967 : }
3968 :
3969 : /*
3970 : * Convert the tuple, if necessary.
3971 : */
3972 728066 : map = ExecGetRootToChildMap(partrel, estate);
3973 728066 : if (map != NULL)
3974 : {
3975 68460 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3976 :
3977 68460 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3978 : }
3979 :
3980 728066 : *partRelInfo = partrel;
3981 728066 : return slot;
3982 : }
3983 :
3984 : /* ----------------------------------------------------------------
3985 : * ExecModifyTable
3986 : *
3987 : * Perform table modifications as required, and return RETURNING results
3988 : * if needed.
3989 : * ----------------------------------------------------------------
3990 : */
3991 : static TupleTableSlot *
3992 128240 : ExecModifyTable(PlanState *pstate)
3993 : {
3994 128240 : ModifyTableState *node = castNode(ModifyTableState, pstate);
3995 : ModifyTableContext context;
3996 128240 : EState *estate = node->ps.state;
3997 128240 : CmdType operation = node->operation;
3998 : ResultRelInfo *resultRelInfo;
3999 : PlanState *subplanstate;
4000 : TupleTableSlot *slot;
4001 : TupleTableSlot *oldSlot;
4002 : ItemPointerData tuple_ctid;
4003 : HeapTupleData oldtupdata;
4004 : HeapTuple oldtuple;
4005 : ItemPointer tupleid;
4006 : bool tuplock;
4007 :
4008 128240 : CHECK_FOR_INTERRUPTS();
4009 :
4010 : /*
4011 : * This should NOT get called during EvalPlanQual; we should have passed a
4012 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
4013 : * Assert because this condition is easy to miss in testing. (Note:
4014 : * although ModifyTable should not get executed within an EvalPlanQual
4015 : * operation, we do have to allow it to be initialized and shut down in
4016 : * case it is within a CTE subplan. Hence this test must be here, not in
4017 : * ExecInitModifyTable.)
4018 : */
4019 128240 : if (estate->es_epq_active != NULL)
4020 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4021 :
4022 : /*
4023 : * If we've already completed processing, don't try to do more. We need
4024 : * this test because ExecPostprocessPlan might call us an extra time, and
4025 : * our subplan's nodes aren't necessarily robust against being called
4026 : * extra times.
4027 : */
4028 128240 : if (node->mt_done)
4029 794 : return NULL;
4030 :
4031 : /*
4032 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
4033 : */
4034 127446 : if (node->fireBSTriggers)
4035 : {
4036 119466 : fireBSTriggers(node);
4037 119454 : node->fireBSTriggers = false;
4038 : }
4039 :
4040 : /* Preload local variables */
4041 127434 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4042 127434 : subplanstate = outerPlanState(node);
4043 :
4044 : /* Set global context */
4045 127434 : context.mtstate = node;
4046 127434 : context.epqstate = &node->mt_epqstate;
4047 127434 : context.estate = estate;
4048 :
4049 : /*
4050 : * Fetch rows from subplan, and execute the required table modification
4051 : * for each row.
4052 : */
4053 : for (;;)
4054 : {
4055 : /*
4056 : * Reset the per-output-tuple exprcontext. This is needed because
4057 : * triggers expect to use that context as workspace. It's a bit ugly
4058 : * to do this below the top level of the plan, however. We might need
4059 : * to rethink this later.
4060 : */
4061 13681460 : ResetPerTupleExprContext(estate);
4062 :
4063 : /*
4064 : * Reset per-tuple memory context used for processing on conflict and
4065 : * returning clauses, to free any expression evaluation storage
4066 : * allocated in the previous cycle.
4067 : */
4068 13681460 : if (pstate->ps_ExprContext)
4069 350072 : ResetExprContext(pstate->ps_ExprContext);
4070 :
4071 : /*
4072 : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4073 : * to execute, do so now --- see the comments in ExecMerge().
4074 : */
4075 13681460 : if (node->mt_merge_pending_not_matched != NULL)
4076 : {
4077 2 : context.planSlot = node->mt_merge_pending_not_matched;
4078 2 : context.cpDeletedSlot = NULL;
4079 :
4080 2 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4081 2 : node->canSetTag);
4082 :
4083 : /* Clear the pending action */
4084 2 : node->mt_merge_pending_not_matched = NULL;
4085 :
4086 : /*
4087 : * If we got a RETURNING result, return it to the caller. We'll
4088 : * continue the work on next call.
4089 : */
4090 2 : if (slot)
4091 2 : return slot;
4092 :
4093 0 : continue; /* continue with the next tuple */
4094 : }
4095 :
4096 : /* Fetch the next row from subplan */
4097 13681458 : context.planSlot = ExecProcNode(subplanstate);
4098 13681048 : context.cpDeletedSlot = NULL;
4099 :
4100 : /* No more tuples to process? */
4101 13681048 : if (TupIsNull(context.planSlot))
4102 : break;
4103 :
4104 : /*
4105 : * When there are multiple result relations, each tuple contains a
4106 : * junk column that gives the OID of the rel from which it came.
4107 : * Extract it and select the correct result relation.
4108 : */
4109 13564862 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
4110 : {
4111 : Datum datum;
4112 : bool isNull;
4113 : Oid resultoid;
4114 :
4115 5112 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4116 : &isNull);
4117 5112 : if (isNull)
4118 : {
4119 : /*
4120 : * For commands other than MERGE, any tuples having InvalidOid
4121 : * for tableoid are errors. For MERGE, we may need to handle
4122 : * them as WHEN NOT MATCHED clauses if any, so do that.
4123 : *
4124 : * Note that we use the node's toplevel resultRelInfo, not any
4125 : * specific partition's.
4126 : */
4127 484 : if (operation == CMD_MERGE)
4128 : {
4129 484 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4130 :
4131 484 : slot = ExecMerge(&context, node->resultRelInfo,
4132 484 : NULL, NULL, node->canSetTag);
4133 :
4134 : /*
4135 : * If we got a RETURNING result, return it to the caller.
4136 : * We'll continue the work on next call.
4137 : */
4138 478 : if (slot)
4139 32 : return slot;
4140 :
4141 446 : continue; /* continue with the next tuple */
4142 : }
4143 :
4144 0 : elog(ERROR, "tableoid is NULL");
4145 : }
4146 4628 : resultoid = DatumGetObjectId(datum);
4147 :
4148 : /* If it's not the same as last time, we need to locate the rel */
4149 4628 : if (resultoid != node->mt_lastResultOid)
4150 3154 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4151 : false, true);
4152 : }
4153 :
4154 : /*
4155 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4156 : * here is compute the RETURNING expressions.
4157 : */
4158 13564378 : if (resultRelInfo->ri_usesFdwDirectModify)
4159 : {
4160 : Assert(resultRelInfo->ri_projectReturning);
4161 :
4162 : /*
4163 : * A scan slot containing the data that was actually inserted,
4164 : * updated or deleted has already been made available to
4165 : * ExecProcessReturning by IterateDirectModify, so no need to
4166 : * provide it here. The individual old and new slots are not
4167 : * needed, since direct-modify is disabled if the RETURNING list
4168 : * refers to OLD/NEW values.
4169 : */
4170 : Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4171 : (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4172 :
4173 694 : slot = ExecProcessReturning(&context, resultRelInfo, operation,
4174 : NULL, NULL, context.planSlot);
4175 :
4176 694 : return slot;
4177 : }
4178 :
4179 13563684 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4180 13563684 : slot = context.planSlot;
4181 :
4182 13563684 : tupleid = NULL;
4183 13563684 : oldtuple = NULL;
4184 :
4185 : /*
4186 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4187 : * to be updated/deleted/merged. For a heap relation, that's a TID;
4188 : * otherwise we may have a wholerow junk attr that carries the old
4189 : * tuple in toto. Keep this in step with the part of
4190 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4191 : */
4192 13563684 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4193 : operation == CMD_MERGE)
4194 : {
4195 : char relkind;
4196 : Datum datum;
4197 : bool isNull;
4198 :
4199 1974794 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4200 1974794 : if (relkind == RELKIND_RELATION ||
4201 562 : relkind == RELKIND_MATVIEW ||
4202 : relkind == RELKIND_PARTITIONED_TABLE)
4203 : {
4204 : /* ri_RowIdAttNo refers to a ctid attribute */
4205 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
4206 1974238 : datum = ExecGetJunkAttribute(slot,
4207 1974238 : resultRelInfo->ri_RowIdAttNo,
4208 : &isNull);
4209 :
4210 : /*
4211 : * For commands other than MERGE, any tuples having a null row
4212 : * identifier are errors. For MERGE, we may need to handle
4213 : * them as WHEN NOT MATCHED clauses if any, so do that.
4214 : *
4215 : * Note that we use the node's toplevel resultRelInfo, not any
4216 : * specific partition's.
4217 : */
4218 1974238 : if (isNull)
4219 : {
4220 2116 : if (operation == CMD_MERGE)
4221 : {
4222 2116 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4223 :
4224 2116 : slot = ExecMerge(&context, node->resultRelInfo,
4225 2116 : NULL, NULL, node->canSetTag);
4226 :
4227 : /*
4228 : * If we got a RETURNING result, return it to the
4229 : * caller. We'll continue the work on next call.
4230 : */
4231 2074 : if (slot)
4232 120 : return slot;
4233 :
4234 1996 : continue; /* continue with the next tuple */
4235 : }
4236 :
4237 0 : elog(ERROR, "ctid is NULL");
4238 : }
4239 :
4240 1972122 : tupleid = (ItemPointer) DatumGetPointer(datum);
4241 1972122 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4242 1972122 : tupleid = &tuple_ctid;
4243 : }
4244 :
4245 : /*
4246 : * Use the wholerow attribute, when available, to reconstruct the
4247 : * old relation tuple. The old tuple serves one or both of two
4248 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4249 : * provides values for any unchanged columns for the NEW tuple of
4250 : * an UPDATE, because the subplan does not produce all the columns
4251 : * of the target table.
4252 : *
4253 : * Note that the wholerow attribute does not carry system columns,
4254 : * so foreign table triggers miss seeing those, except that we
4255 : * know enough here to set t_tableOid. Quite separately from
4256 : * this, the FDW may fetch its own junk attrs to identify the row.
4257 : *
4258 : * Other relevant relkinds, currently limited to views, always
4259 : * have a wholerow attribute.
4260 : */
4261 556 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4262 : {
4263 530 : datum = ExecGetJunkAttribute(slot,
4264 530 : resultRelInfo->ri_RowIdAttNo,
4265 : &isNull);
4266 :
4267 : /*
4268 : * For commands other than MERGE, any tuples having a null row
4269 : * identifier are errors. For MERGE, we may need to handle
4270 : * them as WHEN NOT MATCHED clauses if any, so do that.
4271 : *
4272 : * Note that we use the node's toplevel resultRelInfo, not any
4273 : * specific partition's.
4274 : */
4275 530 : if (isNull)
4276 : {
4277 48 : if (operation == CMD_MERGE)
4278 : {
4279 48 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4280 :
4281 48 : slot = ExecMerge(&context, node->resultRelInfo,
4282 48 : NULL, NULL, node->canSetTag);
4283 :
4284 : /*
4285 : * If we got a RETURNING result, return it to the
4286 : * caller. We'll continue the work on next call.
4287 : */
4288 42 : if (slot)
4289 12 : return slot;
4290 :
4291 30 : continue; /* continue with the next tuple */
4292 : }
4293 :
4294 0 : elog(ERROR, "wholerow is NULL");
4295 : }
4296 :
4297 482 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4298 482 : oldtupdata.t_len =
4299 482 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4300 482 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4301 : /* Historically, view triggers see invalid t_tableOid. */
4302 482 : oldtupdata.t_tableOid =
4303 482 : (relkind == RELKIND_VIEW) ? InvalidOid :
4304 206 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4305 :
4306 482 : oldtuple = &oldtupdata;
4307 : }
4308 : else
4309 : {
4310 : /* Only foreign tables are allowed to omit a row-ID attr */
4311 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4312 : }
4313 : }
4314 :
4315 13561520 : switch (operation)
4316 : {
4317 11588890 : case CMD_INSERT:
4318 : /* Initialize projection info if first time for this table */
4319 11588890 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4320 91488 : ExecInitInsertProjection(node, resultRelInfo);
4321 11588890 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4322 11588890 : slot = ExecInsert(&context, resultRelInfo, slot,
4323 11588890 : node->canSetTag, NULL, NULL);
4324 11586780 : break;
4325 :
4326 316070 : case CMD_UPDATE:
4327 316070 : tuplock = false;
4328 :
4329 : /* Initialize projection info if first time for this table */
4330 316070 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4331 13078 : ExecInitUpdateProjection(node, resultRelInfo);
4332 :
4333 : /*
4334 : * Make the new tuple by combining plan's output tuple with
4335 : * the old tuple being updated.
4336 : */
4337 316070 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4338 316070 : if (oldtuple != NULL)
4339 : {
4340 : Assert(!resultRelInfo->ri_needLockTagTuple);
4341 : /* Use the wholerow junk attr as the old tuple. */
4342 314 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4343 : }
4344 : else
4345 : {
4346 : /* Fetch the most recent version of old tuple. */
4347 315756 : Relation relation = resultRelInfo->ri_RelationDesc;
4348 :
4349 315756 : if (resultRelInfo->ri_needLockTagTuple)
4350 : {
4351 23696 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4352 23696 : tuplock = true;
4353 : }
4354 315756 : if (!table_tuple_fetch_row_version(relation, tupleid,
4355 : SnapshotAny,
4356 : oldSlot))
4357 0 : elog(ERROR, "failed to fetch tuple being updated");
4358 : }
4359 316070 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4360 : oldSlot);
4361 :
4362 : /* Now apply the update. */
4363 316070 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4364 316070 : oldSlot, slot, node->canSetTag);
4365 315560 : if (tuplock)
4366 23696 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4367 : InplaceUpdateTupleLock);
4368 315560 : break;
4369 :
4370 1644516 : case CMD_DELETE:
4371 1644516 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4372 1644516 : true, false, node->canSetTag, NULL, NULL, NULL);
4373 1644434 : break;
4374 :
4375 12044 : case CMD_MERGE:
4376 12044 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4377 12044 : node->canSetTag);
4378 11954 : break;
4379 :
4380 0 : default:
4381 0 : elog(ERROR, "unknown operation");
4382 : break;
4383 : }
4384 :
4385 : /*
4386 : * If we got a RETURNING result, return it to caller. We'll continue
4387 : * the work on next call.
4388 : */
4389 13558728 : if (slot)
4390 7144 : return slot;
4391 : }
4392 :
4393 : /*
4394 : * Insert remaining tuples for batch insert.
4395 : */
4396 116186 : if (estate->es_insert_pending_result_relations != NIL)
4397 24 : ExecPendingInserts(estate);
4398 :
4399 : /*
4400 : * We're done, but fire AFTER STATEMENT triggers before exiting.
4401 : */
4402 116186 : fireASTriggers(node);
4403 :
4404 116186 : node->mt_done = true;
4405 :
4406 116186 : return NULL;
4407 : }
4408 :
4409 : /*
4410 : * ExecLookupResultRelByOid
4411 : * If the table with given OID is among the result relations to be
4412 : * updated by the given ModifyTable node, return its ResultRelInfo.
4413 : *
4414 : * If not found, return NULL if missing_ok, else raise error.
4415 : *
4416 : * If update_cache is true, then upon successful lookup, update the node's
4417 : * one-element cache. ONLY ExecModifyTable may pass true for this.
4418 : */
4419 : ResultRelInfo *
4420 11888 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4421 : bool missing_ok, bool update_cache)
4422 : {
4423 11888 : if (node->mt_resultOidHash)
4424 : {
4425 : /* Use the pre-built hash table to locate the rel */
4426 : MTTargetRelLookup *mtlookup;
4427 :
4428 : mtlookup = (MTTargetRelLookup *)
4429 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4430 0 : if (mtlookup)
4431 : {
4432 0 : if (update_cache)
4433 : {
4434 0 : node->mt_lastResultOid = resultoid;
4435 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
4436 : }
4437 0 : return node->resultRelInfo + mtlookup->relationIndex;
4438 : }
4439 : }
4440 : else
4441 : {
4442 : /* With few target rels, just search the ResultRelInfo array */
4443 22666 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4444 : {
4445 14432 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4446 :
4447 14432 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4448 : {
4449 3654 : if (update_cache)
4450 : {
4451 3154 : node->mt_lastResultOid = resultoid;
4452 3154 : node->mt_lastResultIndex = ndx;
4453 : }
4454 3654 : return rInfo;
4455 : }
4456 : }
4457 : }
4458 :
4459 8234 : if (!missing_ok)
4460 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
4461 8234 : return NULL;
4462 : }
4463 :
4464 : /* ----------------------------------------------------------------
4465 : * ExecInitModifyTable
4466 : * ----------------------------------------------------------------
4467 : */
4468 : ModifyTableState *
4469 120828 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4470 : {
4471 : ModifyTableState *mtstate;
4472 120828 : Plan *subplan = outerPlan(node);
4473 120828 : CmdType operation = node->operation;
4474 120828 : int total_nrels = list_length(node->resultRelations);
4475 : int nrels;
4476 120828 : List *resultRelations = NIL;
4477 120828 : List *withCheckOptionLists = NIL;
4478 120828 : List *returningLists = NIL;
4479 120828 : List *updateColnosLists = NIL;
4480 120828 : List *mergeActionLists = NIL;
4481 120828 : List *mergeJoinConditions = NIL;
4482 : ResultRelInfo *resultRelInfo;
4483 : List *arowmarks;
4484 : ListCell *l;
4485 : int i;
4486 : Relation rel;
4487 :
4488 : /* check for unsupported flags */
4489 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4490 :
4491 : /*
4492 : * Only consider unpruned relations for initializing their ResultRelInfo
4493 : * struct and other fields such as withCheckOptions, etc.
4494 : *
4495 : * Note: We must avoid pruning every result relation. This is important
4496 : * for MERGE, since even if every result relation is pruned from the
4497 : * subplan, there might still be NOT MATCHED rows, for which there may be
4498 : * INSERT actions to perform. To allow these actions to be found, at
4499 : * least one result relation must be kept. Also, when inserting into a
4500 : * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct
4501 : * as a reference for building the ResultRelInfo of the target partition.
4502 : * In either case, it doesn't matter which result relation is kept, so we
4503 : * just keep the first one, if all others have been pruned. See also,
4504 : * ExecDoInitialPruning(), which ensures that this first result relation
4505 : * has been locked.
4506 : */
4507 120828 : i = 0;
4508 244138 : foreach(l, node->resultRelations)
4509 : {
4510 123310 : Index rti = lfirst_int(l);
4511 : bool keep_rel;
4512 :
4513 123310 : keep_rel = bms_is_member(rti, estate->es_unpruned_relids);
4514 123310 : if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL)
4515 : {
4516 : /* all result relations pruned; keep the first one */
4517 48 : keep_rel = true;
4518 48 : rti = linitial_int(node->resultRelations);
4519 48 : i = 0;
4520 : }
4521 :
4522 123310 : if (keep_rel)
4523 : {
4524 123226 : resultRelations = lappend_int(resultRelations, rti);
4525 123226 : if (node->withCheckOptionLists)
4526 : {
4527 1412 : List *withCheckOptions = list_nth_node(List,
4528 : node->withCheckOptionLists,
4529 : i);
4530 :
4531 1412 : withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions);
4532 : }
4533 123226 : if (node->returningLists)
4534 : {
4535 4974 : List *returningList = list_nth_node(List,
4536 : node->returningLists,
4537 : i);
4538 :
4539 4974 : returningLists = lappend(returningLists, returningList);
4540 : }
4541 123226 : if (node->updateColnosLists)
4542 : {
4543 15762 : List *updateColnosList = list_nth(node->updateColnosLists, i);
4544 :
4545 15762 : updateColnosLists = lappend(updateColnosLists, updateColnosList);
4546 : }
4547 123226 : if (node->mergeActionLists)
4548 : {
4549 1736 : List *mergeActionList = list_nth(node->mergeActionLists, i);
4550 :
4551 1736 : mergeActionLists = lappend(mergeActionLists, mergeActionList);
4552 : }
4553 123226 : if (node->mergeJoinConditions)
4554 : {
4555 1736 : List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i);
4556 :
4557 1736 : mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition);
4558 : }
4559 : }
4560 123310 : i++;
4561 : }
4562 120828 : nrels = list_length(resultRelations);
4563 : Assert(nrels > 0);
4564 :
4565 : /*
4566 : * create state structure
4567 : */
4568 120828 : mtstate = makeNode(ModifyTableState);
4569 120828 : mtstate->ps.plan = (Plan *) node;
4570 120828 : mtstate->ps.state = estate;
4571 120828 : mtstate->ps.ExecProcNode = ExecModifyTable;
4572 :
4573 120828 : mtstate->operation = operation;
4574 120828 : mtstate->canSetTag = node->canSetTag;
4575 120828 : mtstate->mt_done = false;
4576 :
4577 120828 : mtstate->mt_nrels = nrels;
4578 120828 : mtstate->resultRelInfo = (ResultRelInfo *)
4579 120828 : palloc(nrels * sizeof(ResultRelInfo));
4580 :
4581 120828 : mtstate->mt_merge_pending_not_matched = NULL;
4582 120828 : mtstate->mt_merge_inserted = 0;
4583 120828 : mtstate->mt_merge_updated = 0;
4584 120828 : mtstate->mt_merge_deleted = 0;
4585 120828 : mtstate->mt_updateColnosLists = updateColnosLists;
4586 120828 : mtstate->mt_mergeActionLists = mergeActionLists;
4587 120828 : mtstate->mt_mergeJoinConditions = mergeJoinConditions;
4588 :
4589 : /*----------
4590 : * Resolve the target relation. This is the same as:
4591 : *
4592 : * - the relation for which we will fire FOR STATEMENT triggers,
4593 : * - the relation into whose tuple format all captured transition tuples
4594 : * must be converted, and
4595 : * - the root partitioned table used for tuple routing.
4596 : *
4597 : * If it's a partitioned or inherited table, the root partition or
4598 : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4599 : * given explicitly in node->rootRelation. Otherwise, the target relation
4600 : * is the sole relation in the node->resultRelations list and, since it can
4601 : * never be pruned, also in the resultRelations list constructed above.
4602 : *----------
4603 : */
4604 120828 : if (node->rootRelation > 0)
4605 : {
4606 : Assert(bms_is_member(node->rootRelation, estate->es_unpruned_relids));
4607 2798 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4608 2798 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4609 : node->rootRelation);
4610 : }
4611 : else
4612 : {
4613 : Assert(list_length(node->resultRelations) == 1);
4614 : Assert(list_length(resultRelations) == 1);
4615 118030 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4616 118030 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4617 118030 : linitial_int(resultRelations));
4618 : }
4619 :
4620 : /* set up epqstate with dummy subplan data for the moment */
4621 120828 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4622 : node->epqParam, resultRelations);
4623 120828 : mtstate->fireBSTriggers = true;
4624 :
4625 : /*
4626 : * Build state for collecting transition tuples. This requires having a
4627 : * valid trigger query context, so skip it in explain-only mode.
4628 : */
4629 120828 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4630 119808 : ExecSetupTransitionCaptureState(mtstate, estate);
4631 :
4632 : /*
4633 : * Open all the result relations and initialize the ResultRelInfo structs.
4634 : * (But root relation was initialized above, if it's part of the array.)
4635 : * We must do this before initializing the subplan, because direct-modify
4636 : * FDWs expect their ResultRelInfos to be available.
4637 : */
4638 120828 : resultRelInfo = mtstate->resultRelInfo;
4639 120828 : i = 0;
4640 243730 : foreach(l, resultRelations)
4641 : {
4642 123220 : Index resultRelation = lfirst_int(l);
4643 123220 : List *mergeActions = NIL;
4644 :
4645 123220 : if (mergeActionLists)
4646 1736 : mergeActions = list_nth(mergeActionLists, i);
4647 :
4648 123220 : if (resultRelInfo != mtstate->rootResultRelInfo)
4649 : {
4650 5190 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4651 :
4652 : /*
4653 : * For child result relations, store the root result relation
4654 : * pointer. We do so for the convenience of places that want to
4655 : * look at the query's original target relation but don't have the
4656 : * mtstate handy.
4657 : */
4658 5190 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4659 : }
4660 :
4661 : /* Initialize the usesFdwDirectModify flag */
4662 123220 : resultRelInfo->ri_usesFdwDirectModify =
4663 123220 : bms_is_member(i, node->fdwDirectModifyPlans);
4664 :
4665 : /*
4666 : * Verify result relation is a valid target for the current operation
4667 : */
4668 123220 : CheckValidResultRel(resultRelInfo, operation, mergeActions);
4669 :
4670 122902 : resultRelInfo++;
4671 122902 : i++;
4672 : }
4673 :
4674 : /*
4675 : * Now we may initialize the subplan.
4676 : */
4677 120510 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4678 :
4679 : /*
4680 : * Do additional per-result-relation initialization.
4681 : */
4682 243378 : for (i = 0; i < nrels; i++)
4683 : {
4684 122868 : resultRelInfo = &mtstate->resultRelInfo[i];
4685 :
4686 : /* Let FDWs init themselves for foreign-table result rels */
4687 122868 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4688 122660 : resultRelInfo->ri_FdwRoutine != NULL &&
4689 322 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4690 : {
4691 322 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4692 :
4693 322 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4694 : resultRelInfo,
4695 : fdw_private,
4696 : i,
4697 : eflags);
4698 : }
4699 :
4700 : /*
4701 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4702 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4703 : * tables, the FDW might have created additional junk attr(s), but
4704 : * those are no concern of ours.
4705 : */
4706 122868 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4707 : operation == CMD_MERGE)
4708 : {
4709 : char relkind;
4710 :
4711 29972 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4712 29972 : if (relkind == RELKIND_RELATION ||
4713 680 : relkind == RELKIND_MATVIEW ||
4714 : relkind == RELKIND_PARTITIONED_TABLE)
4715 : {
4716 29328 : resultRelInfo->ri_RowIdAttNo =
4717 29328 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4718 29328 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4719 0 : elog(ERROR, "could not find junk ctid column");
4720 : }
4721 644 : else if (relkind == RELKIND_FOREIGN_TABLE)
4722 : {
4723 : /*
4724 : * We don't support MERGE with foreign tables for now. (It's
4725 : * problematic because the implementation uses CTID.)
4726 : */
4727 : Assert(operation != CMD_MERGE);
4728 :
4729 : /*
4730 : * When there is a row-level trigger, there should be a
4731 : * wholerow attribute. We also require it to be present in
4732 : * UPDATE and MERGE, so we can get the values of unchanged
4733 : * columns.
4734 : */
4735 356 : resultRelInfo->ri_RowIdAttNo =
4736 356 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4737 : "wholerow");
4738 356 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4739 202 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4740 0 : elog(ERROR, "could not find junk wholerow column");
4741 : }
4742 : else
4743 : {
4744 : /* Other valid target relkinds must provide wholerow */
4745 288 : resultRelInfo->ri_RowIdAttNo =
4746 288 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4747 : "wholerow");
4748 288 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4749 0 : elog(ERROR, "could not find junk wholerow column");
4750 : }
4751 : }
4752 : }
4753 :
4754 : /*
4755 : * If this is an inherited update/delete/merge, there will be a junk
4756 : * attribute named "tableoid" present in the subplan's targetlist. It
4757 : * will be used to identify the result relation for a given tuple to be
4758 : * updated/deleted/merged.
4759 : */
4760 120510 : mtstate->mt_resultOidAttno =
4761 120510 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4762 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || total_nrels == 1);
4763 120510 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4764 120510 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4765 :
4766 : /* Get the root target relation */
4767 120510 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4768 :
4769 : /*
4770 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4771 : * or MERGE might need this too, but only if it actually moves tuples
4772 : * between partitions; in that case setup is done by
4773 : * ExecCrossPartitionUpdate.
4774 : */
4775 120510 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4776 : operation == CMD_INSERT)
4777 5866 : mtstate->mt_partition_tuple_routing =
4778 5866 : ExecSetupPartitionTupleRouting(estate, rel);
4779 :
4780 : /*
4781 : * Initialize any WITH CHECK OPTION constraints if needed.
4782 : */
4783 120510 : resultRelInfo = mtstate->resultRelInfo;
4784 121922 : foreach(l, withCheckOptionLists)
4785 : {
4786 1412 : List *wcoList = (List *) lfirst(l);
4787 1412 : List *wcoExprs = NIL;
4788 : ListCell *ll;
4789 :
4790 3824 : foreach(ll, wcoList)
4791 : {
4792 2412 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4793 2412 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4794 : &mtstate->ps);
4795 :
4796 2412 : wcoExprs = lappend(wcoExprs, wcoExpr);
4797 : }
4798 :
4799 1412 : resultRelInfo->ri_WithCheckOptions = wcoList;
4800 1412 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4801 1412 : resultRelInfo++;
4802 : }
4803 :
4804 : /*
4805 : * Initialize RETURNING projections if needed.
4806 : */
4807 120510 : if (returningLists)
4808 : {
4809 : TupleTableSlot *slot;
4810 : ExprContext *econtext;
4811 :
4812 : /*
4813 : * Initialize result tuple slot and assign its rowtype using the first
4814 : * RETURNING list. We assume the rest will look the same.
4815 : */
4816 4628 : mtstate->ps.plan->targetlist = (List *) linitial(returningLists);
4817 :
4818 : /* Set up a slot for the output of the RETURNING projection(s) */
4819 4628 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
4820 4628 : slot = mtstate->ps.ps_ResultTupleSlot;
4821 :
4822 : /* Need an econtext too */
4823 4628 : if (mtstate->ps.ps_ExprContext == NULL)
4824 4628 : ExecAssignExprContext(estate, &mtstate->ps);
4825 4628 : econtext = mtstate->ps.ps_ExprContext;
4826 :
4827 : /*
4828 : * Build a projection for each result rel.
4829 : */
4830 4628 : resultRelInfo = mtstate->resultRelInfo;
4831 9602 : foreach(l, returningLists)
4832 : {
4833 4974 : List *rlist = (List *) lfirst(l);
4834 :
4835 4974 : resultRelInfo->ri_returningList = rlist;
4836 4974 : resultRelInfo->ri_projectReturning =
4837 4974 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
4838 4974 : resultRelInfo->ri_RelationDesc->rd_att);
4839 4974 : resultRelInfo++;
4840 : }
4841 : }
4842 : else
4843 : {
4844 : /*
4845 : * We still must construct a dummy result tuple type, because InitPlan
4846 : * expects one (maybe should change that?).
4847 : */
4848 115882 : mtstate->ps.plan->targetlist = NIL;
4849 115882 : ExecInitResultTypeTL(&mtstate->ps);
4850 :
4851 115882 : mtstate->ps.ps_ExprContext = NULL;
4852 : }
4853 :
4854 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
4855 120510 : resultRelInfo = mtstate->resultRelInfo;
4856 120510 : if (node->onConflictAction != ONCONFLICT_NONE)
4857 : {
4858 : /* insert may only have one relation, inheritance is not expanded */
4859 : Assert(total_nrels == 1);
4860 1356 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4861 : }
4862 :
4863 : /*
4864 : * If needed, Initialize target list, projection and qual for ON CONFLICT
4865 : * DO UPDATE.
4866 : */
4867 120510 : if (node->onConflictAction == ONCONFLICT_UPDATE)
4868 : {
4869 912 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
4870 : ExprContext *econtext;
4871 : TupleDesc relationDesc;
4872 :
4873 : /* already exists if created by RETURNING processing above */
4874 912 : if (mtstate->ps.ps_ExprContext == NULL)
4875 632 : ExecAssignExprContext(estate, &mtstate->ps);
4876 :
4877 912 : econtext = mtstate->ps.ps_ExprContext;
4878 912 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4879 :
4880 : /* create state for DO UPDATE SET operation */
4881 912 : resultRelInfo->ri_onConflict = onconfl;
4882 :
4883 : /* initialize slot for the existing tuple */
4884 912 : onconfl->oc_Existing =
4885 912 : table_slot_create(resultRelInfo->ri_RelationDesc,
4886 912 : &mtstate->ps.state->es_tupleTable);
4887 :
4888 : /*
4889 : * Create the tuple slot for the UPDATE SET projection. We want a slot
4890 : * of the table's type here, because the slot will be used to insert
4891 : * into the table, and for RETURNING processing - which may access
4892 : * system attributes.
4893 : */
4894 912 : onconfl->oc_ProjSlot =
4895 912 : table_slot_create(resultRelInfo->ri_RelationDesc,
4896 912 : &mtstate->ps.state->es_tupleTable);
4897 :
4898 : /* build UPDATE SET projection state */
4899 912 : onconfl->oc_ProjInfo =
4900 912 : ExecBuildUpdateProjection(node->onConflictSet,
4901 : true,
4902 : node->onConflictCols,
4903 : relationDesc,
4904 : econtext,
4905 : onconfl->oc_ProjSlot,
4906 : &mtstate->ps);
4907 :
4908 : /* initialize state to evaluate the WHERE clause, if any */
4909 912 : if (node->onConflictWhere)
4910 : {
4911 : ExprState *qualexpr;
4912 :
4913 176 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
4914 : &mtstate->ps);
4915 176 : onconfl->oc_WhereClause = qualexpr;
4916 : }
4917 : }
4918 :
4919 : /*
4920 : * If we have any secondary relations in an UPDATE or DELETE, they need to
4921 : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
4922 : * EvalPlanQual mechanism needs to be told about them. This also goes for
4923 : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
4924 : */
4925 120510 : arowmarks = NIL;
4926 123320 : foreach(l, node->rowMarks)
4927 : {
4928 2810 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
4929 : ExecRowMark *erm;
4930 : ExecAuxRowMark *aerm;
4931 :
4932 : /*
4933 : * Ignore "parent" rowmarks, because they are irrelevant at runtime.
4934 : * Also ignore the rowmarks belonging to child tables that have been
4935 : * pruned in ExecDoInitialPruning().
4936 : */
4937 2810 : if (rc->isParent ||
4938 2668 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
4939 596 : continue;
4940 :
4941 : /* Find ExecRowMark and build ExecAuxRowMark */
4942 2214 : erm = ExecFindRowMark(estate, rc->rti, false);
4943 2214 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4944 2214 : arowmarks = lappend(arowmarks, aerm);
4945 : }
4946 :
4947 : /* For a MERGE command, initialize its state */
4948 120510 : if (mtstate->operation == CMD_MERGE)
4949 1504 : ExecInitMerge(mtstate, estate);
4950 :
4951 120510 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4952 :
4953 : /*
4954 : * If there are a lot of result relations, use a hash table to speed the
4955 : * lookups. If there are not a lot, a simple linear search is faster.
4956 : *
4957 : * It's not clear where the threshold is, but try 64 for starters. In a
4958 : * debugging build, use a small threshold so that we get some test
4959 : * coverage of both code paths.
4960 : */
4961 : #ifdef USE_ASSERT_CHECKING
4962 : #define MT_NRELS_HASH 4
4963 : #else
4964 : #define MT_NRELS_HASH 64
4965 : #endif
4966 120510 : if (nrels >= MT_NRELS_HASH)
4967 : {
4968 : HASHCTL hash_ctl;
4969 :
4970 0 : hash_ctl.keysize = sizeof(Oid);
4971 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4972 0 : hash_ctl.hcxt = CurrentMemoryContext;
4973 0 : mtstate->mt_resultOidHash =
4974 0 : hash_create("ModifyTable target hash",
4975 : nrels, &hash_ctl,
4976 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
4977 0 : for (i = 0; i < nrels; i++)
4978 : {
4979 : Oid hashkey;
4980 : MTTargetRelLookup *mtlookup;
4981 : bool found;
4982 :
4983 0 : resultRelInfo = &mtstate->resultRelInfo[i];
4984 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
4985 : mtlookup = (MTTargetRelLookup *)
4986 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
4987 : HASH_ENTER, &found);
4988 : Assert(!found);
4989 0 : mtlookup->relationIndex = i;
4990 : }
4991 : }
4992 : else
4993 120510 : mtstate->mt_resultOidHash = NULL;
4994 :
4995 : /*
4996 : * Determine if the FDW supports batch insert and determine the batch size
4997 : * (a FDW may support batching, but it may be disabled for the
4998 : * server/table).
4999 : *
5000 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
5001 : * remains set to 0.
5002 : */
5003 120510 : if (operation == CMD_INSERT)
5004 : {
5005 : /* insert may only have one relation, inheritance is not expanded */
5006 : Assert(total_nrels == 1);
5007 92896 : resultRelInfo = mtstate->resultRelInfo;
5008 92896 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5009 92896 : resultRelInfo->ri_FdwRoutine != NULL &&
5010 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
5011 174 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
5012 : {
5013 174 : resultRelInfo->ri_BatchSize =
5014 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
5015 174 : Assert(resultRelInfo->ri_BatchSize >= 1);
5016 : }
5017 : else
5018 92722 : resultRelInfo->ri_BatchSize = 1;
5019 : }
5020 :
5021 : /*
5022 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
5023 : * to estate->es_auxmodifytables so that it will be run to completion by
5024 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
5025 : * ModifyTable node too, but there's no need.) Note the use of lcons not
5026 : * lappend: we need later-initialized ModifyTable nodes to be shut down
5027 : * before earlier ones. This ensures that we don't throw away RETURNING
5028 : * rows that need to be seen by a later CTE subplan.
5029 : */
5030 120510 : if (!mtstate->canSetTag)
5031 942 : estate->es_auxmodifytables = lcons(mtstate,
5032 : estate->es_auxmodifytables);
5033 :
5034 120510 : return mtstate;
5035 : }
5036 :
5037 : /* ----------------------------------------------------------------
5038 : * ExecEndModifyTable
5039 : *
5040 : * Shuts down the plan.
5041 : *
5042 : * Returns nothing of interest.
5043 : * ----------------------------------------------------------------
5044 : */
5045 : void
5046 116182 : ExecEndModifyTable(ModifyTableState *node)
5047 : {
5048 : int i;
5049 :
5050 : /*
5051 : * Allow any FDWs to shut down
5052 : */
5053 234420 : for (i = 0; i < node->mt_nrels; i++)
5054 : {
5055 : int j;
5056 118238 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
5057 :
5058 118238 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5059 118046 : resultRelInfo->ri_FdwRoutine != NULL &&
5060 302 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
5061 302 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
5062 : resultRelInfo);
5063 :
5064 : /*
5065 : * Cleanup the initialized batch slots. This only matters for FDWs
5066 : * with batching, but the other cases will have ri_NumSlotsInitialized
5067 : * == 0.
5068 : */
5069 118294 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
5070 : {
5071 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
5072 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
5073 : }
5074 : }
5075 :
5076 : /*
5077 : * Close all the partitioned tables, leaf partitions, and their indices
5078 : * and release the slot used for tuple routing, if set.
5079 : */
5080 116182 : if (node->mt_partition_tuple_routing)
5081 : {
5082 5944 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
5083 :
5084 5944 : if (node->mt_root_tuple_slot)
5085 644 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
5086 : }
5087 :
5088 : /*
5089 : * Terminate EPQ execution if active
5090 : */
5091 116182 : EvalPlanQualEnd(&node->mt_epqstate);
5092 :
5093 : /*
5094 : * shut down subplan
5095 : */
5096 116182 : ExecEndNode(outerPlanState(node));
5097 116182 : }
5098 :
5099 : void
5100 0 : ExecReScanModifyTable(ModifyTableState *node)
5101 : {
5102 : /*
5103 : * Currently, we don't need to support rescan on ModifyTable nodes. The
5104 : * semantics of that would be a bit debatable anyway.
5105 : */
5106 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
5107 : }
|