Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * MERGE runs a join between the source relation and the target
28 : * table; if any WHEN NOT MATCHED clauses are present, then the
29 : * join is an outer join. In this case, any unmatched tuples will
30 : * have NULL row-locating info, and only INSERT can be run. But for
31 : * matched tuples, then row-locating info is used to determine the
32 : * tuple to UPDATE or DELETE. When all clauses are WHEN MATCHED,
33 : * then an inner join is used, so all tuples contain row-locating info.
34 : *
35 : * If the query specifies RETURNING, then the ModifyTable returns a
36 : * RETURNING tuple after completing each row insert, update, or delete.
37 : * It must be called again to continue the operation. Without RETURNING,
38 : * we just loop within the node until all the work is done, then
39 : * return NULL. This avoids useless call/return overhead. (MERGE does
40 : * not support RETURNING.)
41 : */
42 :
43 : #include "postgres.h"
44 :
45 : #include "access/heapam.h"
46 : #include "access/htup_details.h"
47 : #include "access/tableam.h"
48 : #include "access/xact.h"
49 : #include "catalog/catalog.h"
50 : #include "commands/trigger.h"
51 : #include "executor/execPartition.h"
52 : #include "executor/executor.h"
53 : #include "executor/nodeModifyTable.h"
54 : #include "foreign/fdwapi.h"
55 : #include "miscadmin.h"
56 : #include "nodes/nodeFuncs.h"
57 : #include "optimizer/optimizer.h"
58 : #include "rewrite/rewriteHandler.h"
59 : #include "storage/bufmgr.h"
60 : #include "storage/lmgr.h"
61 : #include "utils/builtins.h"
62 : #include "utils/datum.h"
63 : #include "utils/memutils.h"
64 : #include "utils/rel.h"
65 :
66 :
67 : typedef struct MTTargetRelLookup
68 : {
69 : Oid relationOid; /* hash key, must be first */
70 : int relationIndex; /* rel's index in resultRelInfo[] array */
71 : } MTTargetRelLookup;
72 :
73 : /*
74 : * Context struct for a ModifyTable operation, containing basic execution
75 : * state and some output variables populated by ExecUpdateAct() and
76 : * ExecDeleteAct() to report the result of their actions to callers.
77 : */
78 : typedef struct ModifyTableContext
79 : {
80 : /* Operation state */
81 : ModifyTableState *mtstate;
82 : EPQState *epqstate;
83 : EState *estate;
84 :
85 : /*
86 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
87 : * access "junk" columns that are not going to be stored.
88 : */
89 : TupleTableSlot *planSlot;
90 :
91 : /* MERGE specific */
92 : MergeActionState *relaction; /* MERGE action in progress */
93 :
94 : /*
95 : * Information about the changes that were made concurrently to a tuple
96 : * being updated or deleted
97 : */
98 : TM_FailureData tmfd;
99 :
100 : /*
101 : * The tuple projected by the INSERT's RETURNING clause, when doing a
102 : * cross-partition UPDATE
103 : */
104 : TupleTableSlot *cpUpdateReturningSlot;
105 : } ModifyTableContext;
106 :
107 : /*
108 : * Context struct containing output data specific to UPDATE operations.
109 : */
110 : typedef struct UpdateContext
111 : {
112 : bool updated; /* did UPDATE actually occur? */
113 : bool crossPartUpdate; /* was it a cross-partition update? */
114 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
115 :
116 : /*
117 : * Lock mode to acquire on the latest tuple version before performing
118 : * EvalPlanQual on it
119 : */
120 : LockTupleMode lockmode;
121 : } UpdateContext;
122 :
123 :
124 : static void ExecBatchInsert(ModifyTableState *mtstate,
125 : ResultRelInfo *resultRelInfo,
126 : TupleTableSlot **slots,
127 : TupleTableSlot **planSlots,
128 : int numSlots,
129 : EState *estate,
130 : bool canSetTag);
131 : static void ExecPendingInserts(EState *estate);
132 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
133 : ResultRelInfo *sourcePartInfo,
134 : ResultRelInfo *destPartInfo,
135 : ItemPointer tupleid,
136 : TupleTableSlot *oldslot,
137 : TupleTableSlot *newslot);
138 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
139 : ResultRelInfo *resultRelInfo,
140 : ItemPointer conflictTid,
141 : TupleTableSlot *excludedSlot,
142 : bool canSetTag,
143 : TupleTableSlot **returning);
144 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
145 : EState *estate,
146 : PartitionTupleRouting *proute,
147 : ResultRelInfo *targetRelInfo,
148 : TupleTableSlot *slot,
149 : ResultRelInfo **partRelInfo);
150 :
151 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
152 : ResultRelInfo *resultRelInfo,
153 : ItemPointer tupleid,
154 : bool canSetTag);
155 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
156 : static bool ExecMergeMatched(ModifyTableContext *context,
157 : ResultRelInfo *resultRelInfo,
158 : ItemPointer tupleid,
159 : bool canSetTag);
160 : static void ExecMergeNotMatched(ModifyTableContext *context,
161 : ResultRelInfo *resultRelInfo,
162 : bool canSetTag);
163 :
164 :
165 : /*
166 : * Verify that the tuples to be produced by INSERT match the
167 : * target relation's rowtype
168 : *
169 : * We do this to guard against stale plans. If plan invalidation is
170 : * functioning properly then we should never get a failure here, but better
171 : * safe than sorry. Note that this is called after we have obtained lock
172 : * on the target rel, so the rowtype can't change underneath us.
173 : *
174 : * The plan output is represented by its targetlist, because that makes
175 : * handling the dropped-column case easier.
176 : *
177 : * We used to use this for UPDATE as well, but now the equivalent checks
178 : * are done in ExecBuildUpdateProjection.
179 : */
180 : static void
181 76616 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
182 : {
183 76616 : TupleDesc resultDesc = RelationGetDescr(resultRel);
184 76616 : int attno = 0;
185 : ListCell *lc;
186 :
187 252518 : foreach(lc, targetList)
188 : {
189 175902 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
190 : Form_pg_attribute attr;
191 :
192 : Assert(!tle->resjunk); /* caller removed junk items already */
193 :
194 175902 : if (attno >= resultDesc->natts)
195 0 : ereport(ERROR,
196 : (errcode(ERRCODE_DATATYPE_MISMATCH),
197 : errmsg("table row type and query-specified row type do not match"),
198 : errdetail("Query has too many columns.")));
199 175902 : attr = TupleDescAttr(resultDesc, attno);
200 175902 : attno++;
201 :
202 175902 : if (!attr->attisdropped)
203 : {
204 : /* Normal case: demand type match */
205 175292 : if (exprType((Node *) tle->expr) != attr->atttypid)
206 0 : ereport(ERROR,
207 : (errcode(ERRCODE_DATATYPE_MISMATCH),
208 : errmsg("table row type and query-specified row type do not match"),
209 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
210 : format_type_be(attr->atttypid),
211 : attno,
212 : format_type_be(exprType((Node *) tle->expr)))));
213 : }
214 : else
215 : {
216 : /*
217 : * For a dropped column, we can't check atttypid (it's likely 0).
218 : * In any case the planner has most likely inserted an INT4 null.
219 : * What we insist on is just *some* NULL constant.
220 : */
221 610 : if (!IsA(tle->expr, Const) ||
222 610 : !((Const *) tle->expr)->constisnull)
223 0 : ereport(ERROR,
224 : (errcode(ERRCODE_DATATYPE_MISMATCH),
225 : errmsg("table row type and query-specified row type do not match"),
226 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
227 : attno)));
228 : }
229 : }
230 76616 : if (attno != resultDesc->natts)
231 0 : ereport(ERROR,
232 : (errcode(ERRCODE_DATATYPE_MISMATCH),
233 : errmsg("table row type and query-specified row type do not match"),
234 : errdetail("Query has too few columns.")));
235 76616 : }
236 :
237 : /*
238 : * ExecProcessReturning --- evaluate a RETURNING list
239 : *
240 : * resultRelInfo: current result rel
241 : * tupleSlot: slot holding tuple actually inserted/updated/deleted
242 : * planSlot: slot holding tuple returned by top subplan node
243 : *
244 : * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
245 : * scan tuple.
246 : *
247 : * Returns a slot holding the result tuple
248 : */
249 : static TupleTableSlot *
250 7030 : ExecProcessReturning(ResultRelInfo *resultRelInfo,
251 : TupleTableSlot *tupleSlot,
252 : TupleTableSlot *planSlot)
253 : {
254 7030 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
255 7030 : ExprContext *econtext = projectReturning->pi_exprContext;
256 :
257 : /* Make tuple and any needed join variables available to ExecProject */
258 7030 : if (tupleSlot)
259 6336 : econtext->ecxt_scantuple = tupleSlot;
260 7030 : econtext->ecxt_outertuple = planSlot;
261 :
262 : /*
263 : * RETURNING expressions might reference the tableoid column, so
264 : * reinitialize tts_tableOid before evaluating them.
265 : */
266 7030 : econtext->ecxt_scantuple->tts_tableOid =
267 7030 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
268 :
269 : /* Compute the RETURNING expressions */
270 7030 : return ExecProject(projectReturning);
271 : }
272 :
273 : /*
274 : * ExecCheckTupleVisible -- verify tuple is visible
275 : *
276 : * It would not be consistent with guarantees of the higher isolation levels to
277 : * proceed with avoiding insertion (taking speculative insertion's alternative
278 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
279 : * Check for the need to raise a serialization failure, and do so as necessary.
280 : */
281 : static void
282 5240 : ExecCheckTupleVisible(EState *estate,
283 : Relation rel,
284 : TupleTableSlot *slot)
285 : {
286 5240 : if (!IsolationUsesXactSnapshot())
287 5176 : return;
288 :
289 64 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
290 : {
291 : Datum xminDatum;
292 : TransactionId xmin;
293 : bool isnull;
294 :
295 40 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
296 : Assert(!isnull);
297 40 : xmin = DatumGetTransactionId(xminDatum);
298 :
299 : /*
300 : * We should not raise a serialization failure if the conflict is
301 : * against a tuple inserted by our own transaction, even if it's not
302 : * visible to our snapshot. (This would happen, for example, if
303 : * conflicting keys are proposed for insertion in a single command.)
304 : */
305 40 : if (!TransactionIdIsCurrentTransactionId(xmin))
306 20 : ereport(ERROR,
307 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
308 : errmsg("could not serialize access due to concurrent update")));
309 : }
310 : }
311 :
312 : /*
313 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
314 : */
315 : static void
316 158 : ExecCheckTIDVisible(EState *estate,
317 : ResultRelInfo *relinfo,
318 : ItemPointer tid,
319 : TupleTableSlot *tempSlot)
320 : {
321 158 : Relation rel = relinfo->ri_RelationDesc;
322 :
323 : /* Redundantly check isolation level */
324 158 : if (!IsolationUsesXactSnapshot())
325 94 : return;
326 :
327 64 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
328 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
329 64 : ExecCheckTupleVisible(estate, rel, tempSlot);
330 44 : ExecClearTuple(tempSlot);
331 : }
332 :
333 : /*
334 : * Initialize to compute stored generated columns for a tuple
335 : *
336 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI
337 : * or ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
338 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
339 : *
340 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
341 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
342 : * cross-partition UPDATEs, since a partition might be the target of both
343 : * UPDATE and INSERT actions.
344 : */
345 : void
346 57836 : ExecInitStoredGenerated(ResultRelInfo *resultRelInfo,
347 : EState *estate,
348 : CmdType cmdtype)
349 : {
350 57836 : Relation rel = resultRelInfo->ri_RelationDesc;
351 57836 : TupleDesc tupdesc = RelationGetDescr(rel);
352 57836 : int natts = tupdesc->natts;
353 : ExprState **ri_GeneratedExprs;
354 : int ri_NumGeneratedNeeded;
355 : Bitmapset *updatedCols;
356 : MemoryContext oldContext;
357 :
358 : /* Nothing to do if no generated columns */
359 57836 : if (!(tupdesc->constr && tupdesc->constr->has_generated_stored))
360 56926 : return;
361 :
362 : /*
363 : * In an UPDATE, we can skip computing any generated columns that do not
364 : * depend on any UPDATE target column. But if there is a BEFORE ROW
365 : * UPDATE trigger, we cannot skip because the trigger might change more
366 : * columns.
367 : */
368 910 : if (cmdtype == CMD_UPDATE &&
369 228 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
370 202 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
371 : else
372 708 : updatedCols = NULL;
373 :
374 : /*
375 : * Make sure these data structures are built in the per-query memory
376 : * context so they'll survive throughout the query.
377 : */
378 910 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
379 :
380 910 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
381 910 : ri_NumGeneratedNeeded = 0;
382 :
383 3502 : for (int i = 0; i < natts; i++)
384 : {
385 2592 : if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
386 : {
387 : Expr *expr;
388 :
389 : /* Fetch the GENERATED AS expression tree */
390 930 : expr = (Expr *) build_column_default(rel, i + 1);
391 930 : if (expr == NULL)
392 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
393 : i + 1, RelationGetRelationName(rel));
394 :
395 : /*
396 : * If it's an update with a known set of update target columns,
397 : * see if we can skip the computation.
398 : */
399 930 : if (updatedCols)
400 : {
401 208 : Bitmapset *attrs_used = NULL;
402 :
403 208 : pull_varattnos((Node *) expr, 1, &attrs_used);
404 :
405 208 : if (!bms_overlap(updatedCols, attrs_used))
406 24 : continue; /* need not update this column */
407 : }
408 :
409 : /* No luck, so prepare the expression for execution */
410 906 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
411 906 : ri_NumGeneratedNeeded++;
412 :
413 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
414 906 : if (cmdtype == CMD_UPDATE)
415 210 : resultRelInfo->ri_extraUpdatedCols =
416 210 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
417 : i + 1 - FirstLowInvalidHeapAttributeNumber);
418 : }
419 : }
420 :
421 : /* Save in appropriate set of fields */
422 910 : if (cmdtype == CMD_UPDATE)
423 : {
424 : /* Don't call twice */
425 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
426 :
427 228 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
428 228 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
429 : }
430 : else
431 : {
432 : /* Don't call twice */
433 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
434 :
435 682 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
436 682 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
437 : }
438 :
439 910 : MemoryContextSwitchTo(oldContext);
440 : }
441 :
442 : /*
443 : * Compute stored generated columns for a tuple
444 : */
445 : void
446 1202 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
447 : EState *estate, TupleTableSlot *slot,
448 : CmdType cmdtype)
449 : {
450 1202 : Relation rel = resultRelInfo->ri_RelationDesc;
451 1202 : TupleDesc tupdesc = RelationGetDescr(rel);
452 1202 : int natts = tupdesc->natts;
453 1202 : ExprContext *econtext = GetPerTupleExprContext(estate);
454 : ExprState **ri_GeneratedExprs;
455 : MemoryContext oldContext;
456 : Datum *values;
457 : bool *nulls;
458 :
459 : /* We should not be called unless this is true */
460 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
461 :
462 : /*
463 : * Initialize the expressions if we didn't already, and check whether we
464 : * can exit early because nothing needs to be computed.
465 : */
466 1202 : if (cmdtype == CMD_UPDATE)
467 : {
468 264 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
469 202 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
470 264 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
471 18 : return;
472 246 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
473 : }
474 : else
475 : {
476 938 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
477 682 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
478 : /* Early exit is impossible given the prior Assert */
479 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
480 938 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
481 : }
482 :
483 1184 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
484 :
485 1184 : values = palloc(sizeof(*values) * natts);
486 1184 : nulls = palloc(sizeof(*nulls) * natts);
487 :
488 1184 : slot_getallattrs(slot);
489 1184 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
490 :
491 4442 : for (int i = 0; i < natts; i++)
492 : {
493 3270 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
494 :
495 3270 : if (ri_GeneratedExprs[i])
496 : {
497 : Datum val;
498 : bool isnull;
499 :
500 : Assert(attr->attgenerated == ATTRIBUTE_GENERATED_STORED);
501 :
502 1198 : econtext->ecxt_scantuple = slot;
503 :
504 1198 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
505 :
506 : /*
507 : * We must make a copy of val as we have no guarantees about where
508 : * memory for a pass-by-reference Datum is located.
509 : */
510 1186 : if (!isnull)
511 1144 : val = datumCopy(val, attr->attbyval, attr->attlen);
512 :
513 1186 : values[i] = val;
514 1186 : nulls[i] = isnull;
515 : }
516 : else
517 : {
518 2072 : if (!nulls[i])
519 2030 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
520 : }
521 : }
522 :
523 1172 : ExecClearTuple(slot);
524 1172 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
525 1172 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
526 1172 : ExecStoreVirtualTuple(slot);
527 1172 : ExecMaterializeSlot(slot);
528 :
529 1172 : MemoryContextSwitchTo(oldContext);
530 : }
531 :
532 : /*
533 : * ExecInitInsertProjection
534 : * Do one-time initialization of projection data for INSERT tuples.
535 : *
536 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
537 : *
538 : * This is also a convenient place to verify that the
539 : * output of an INSERT matches the target table.
540 : */
541 : static void
542 75962 : ExecInitInsertProjection(ModifyTableState *mtstate,
543 : ResultRelInfo *resultRelInfo)
544 : {
545 75962 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
546 75962 : Plan *subplan = outerPlan(node);
547 75962 : EState *estate = mtstate->ps.state;
548 75962 : List *insertTargetList = NIL;
549 75962 : bool need_projection = false;
550 : ListCell *l;
551 :
552 : /* Extract non-junk columns of the subplan's result tlist. */
553 250170 : foreach(l, subplan->targetlist)
554 : {
555 174208 : TargetEntry *tle = (TargetEntry *) lfirst(l);
556 :
557 174208 : if (!tle->resjunk)
558 174208 : insertTargetList = lappend(insertTargetList, tle);
559 : else
560 0 : need_projection = true;
561 : }
562 :
563 : /*
564 : * The junk-free list must produce a tuple suitable for the result
565 : * relation.
566 : */
567 75962 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
568 :
569 : /* We'll need a slot matching the table's format. */
570 75962 : resultRelInfo->ri_newTupleSlot =
571 75962 : table_slot_create(resultRelInfo->ri_RelationDesc,
572 : &estate->es_tupleTable);
573 :
574 : /* Build ProjectionInfo if needed (it probably isn't). */
575 75962 : if (need_projection)
576 : {
577 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
578 :
579 : /* need an expression context to do the projection */
580 0 : if (mtstate->ps.ps_ExprContext == NULL)
581 0 : ExecAssignExprContext(estate, &mtstate->ps);
582 :
583 0 : resultRelInfo->ri_projectNew =
584 0 : ExecBuildProjectionInfo(insertTargetList,
585 : mtstate->ps.ps_ExprContext,
586 : resultRelInfo->ri_newTupleSlot,
587 : &mtstate->ps,
588 : relDesc);
589 : }
590 :
591 75962 : resultRelInfo->ri_projectNewInfoValid = true;
592 75962 : }
593 :
594 : /*
595 : * ExecInitUpdateProjection
596 : * Do one-time initialization of projection data for UPDATE tuples.
597 : *
598 : * UPDATE always needs a projection, because (1) there's always some junk
599 : * attrs, and (2) we may need to merge values of not-updated columns from
600 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
601 : * the subplan contains only new values for the changed columns, plus row
602 : * identity info in the junk attrs.
603 : *
604 : * This is "one-time" for any given result rel, but we might touch more than
605 : * one result rel in the course of an inherited UPDATE, and each one needs
606 : * its own projection due to possible column order variation.
607 : *
608 : * This is also a convenient place to verify that the output of an UPDATE
609 : * matches the target table (ExecBuildUpdateProjection does that).
610 : */
611 : static void
612 12142 : ExecInitUpdateProjection(ModifyTableState *mtstate,
613 : ResultRelInfo *resultRelInfo)
614 : {
615 12142 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
616 12142 : Plan *subplan = outerPlan(node);
617 12142 : EState *estate = mtstate->ps.state;
618 12142 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
619 : int whichrel;
620 : List *updateColnos;
621 :
622 : /*
623 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
624 : * to, we can get the index the hard way with an integer division.
625 : */
626 12142 : whichrel = mtstate->mt_lastResultIndex;
627 12142 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
628 : {
629 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
630 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
631 : }
632 :
633 12142 : updateColnos = (List *) list_nth(node->updateColnosLists, whichrel);
634 :
635 : /*
636 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
637 : * produced by the subplan to get the new tuple. We need two slots, both
638 : * matching the table's desired format.
639 : */
640 12142 : resultRelInfo->ri_oldTupleSlot =
641 12142 : table_slot_create(resultRelInfo->ri_RelationDesc,
642 : &estate->es_tupleTable);
643 12142 : resultRelInfo->ri_newTupleSlot =
644 12142 : table_slot_create(resultRelInfo->ri_RelationDesc,
645 : &estate->es_tupleTable);
646 :
647 : /* need an expression context to do the projection */
648 12142 : if (mtstate->ps.ps_ExprContext == NULL)
649 10960 : ExecAssignExprContext(estate, &mtstate->ps);
650 :
651 12142 : resultRelInfo->ri_projectNew =
652 12142 : ExecBuildUpdateProjection(subplan->targetlist,
653 : false, /* subplan did the evaluation */
654 : updateColnos,
655 : relDesc,
656 : mtstate->ps.ps_ExprContext,
657 : resultRelInfo->ri_newTupleSlot,
658 : &mtstate->ps);
659 :
660 12142 : resultRelInfo->ri_projectNewInfoValid = true;
661 12142 : }
662 :
663 : /*
664 : * ExecGetInsertNewTuple
665 : * This prepares a "new" tuple ready to be inserted into given result
666 : * relation, by removing any junk columns of the plan's output tuple
667 : * and (if necessary) coercing the tuple to the right tuple format.
668 : */
669 : static TupleTableSlot *
670 11142822 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
671 : TupleTableSlot *planSlot)
672 : {
673 11142822 : ProjectionInfo *newProj = relinfo->ri_projectNew;
674 : ExprContext *econtext;
675 :
676 : /*
677 : * If there's no projection to be done, just make sure the slot is of the
678 : * right type for the target rel. If the planSlot is the right type we
679 : * can use it as-is, else copy the data into ri_newTupleSlot.
680 : */
681 11142822 : if (newProj == NULL)
682 : {
683 11142822 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
684 : {
685 10372862 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
686 10372862 : return relinfo->ri_newTupleSlot;
687 : }
688 : else
689 769960 : return planSlot;
690 : }
691 :
692 : /*
693 : * Else project; since the projection output slot is ri_newTupleSlot, this
694 : * will also fix any slot-type problem.
695 : *
696 : * Note: currently, this is dead code, because INSERT cases don't receive
697 : * any junk columns so there's never a projection to be done.
698 : */
699 0 : econtext = newProj->pi_exprContext;
700 0 : econtext->ecxt_outertuple = planSlot;
701 0 : return ExecProject(newProj);
702 : }
703 :
704 : /*
705 : * ExecGetUpdateNewTuple
706 : * This prepares a "new" tuple by combining an UPDATE subplan's output
707 : * tuple (which contains values of changed columns) with unchanged
708 : * columns taken from the old tuple.
709 : *
710 : * The subplan tuple might also contain junk columns, which are ignored.
711 : * Note that the projection also ensures we have a slot of the right type.
712 : */
713 : TupleTableSlot *
714 303064 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
715 : TupleTableSlot *planSlot,
716 : TupleTableSlot *oldSlot)
717 : {
718 303064 : ProjectionInfo *newProj = relinfo->ri_projectNew;
719 : ExprContext *econtext;
720 :
721 : /* Use a few extra Asserts to protect against outside callers */
722 : Assert(relinfo->ri_projectNewInfoValid);
723 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
724 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
725 :
726 303064 : econtext = newProj->pi_exprContext;
727 303064 : econtext->ecxt_outertuple = planSlot;
728 303064 : econtext->ecxt_scantuple = oldSlot;
729 303064 : return ExecProject(newProj);
730 : }
731 :
732 : /* ----------------------------------------------------------------
733 : * ExecInsert
734 : *
735 : * For INSERT, we have to insert the tuple into the target relation
736 : * (or partition thereof) and insert appropriate tuples into the index
737 : * relations.
738 : *
739 : * slot contains the new tuple value to be stored.
740 : *
741 : * Returns RETURNING result if any, otherwise NULL.
742 : * *inserted_tuple is the tuple that's effectively inserted;
743 : * *insert_destrel is the relation where it was inserted.
744 : * These are only set on success.
745 : *
746 : * This may change the currently active tuple conversion map in
747 : * mtstate->mt_transition_capture, so the callers must take care to
748 : * save the previous value to avoid losing track of it.
749 : * ----------------------------------------------------------------
750 : */
751 : static TupleTableSlot *
752 11145156 : ExecInsert(ModifyTableContext *context,
753 : ResultRelInfo *resultRelInfo,
754 : TupleTableSlot *slot,
755 : bool canSetTag,
756 : TupleTableSlot **inserted_tuple,
757 : ResultRelInfo **insert_destrel)
758 : {
759 11145156 : ModifyTableState *mtstate = context->mtstate;
760 11145156 : EState *estate = context->estate;
761 : Relation resultRelationDesc;
762 11145156 : List *recheckIndexes = NIL;
763 11145156 : TupleTableSlot *planSlot = context->planSlot;
764 11145156 : TupleTableSlot *result = NULL;
765 : TransitionCaptureState *ar_insert_trig_tcs;
766 11145156 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
767 11145156 : OnConflictAction onconflict = node->onConflictAction;
768 11145156 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
769 : MemoryContext oldContext;
770 :
771 : /*
772 : * If the input result relation is a partitioned table, find the leaf
773 : * partition to insert the tuple into.
774 : */
775 11145156 : if (proute)
776 : {
777 : ResultRelInfo *partRelInfo;
778 :
779 721090 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
780 : resultRelInfo, slot,
781 : &partRelInfo);
782 720892 : resultRelInfo = partRelInfo;
783 : }
784 :
785 11144958 : ExecMaterializeSlot(slot);
786 :
787 11144958 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
788 :
789 : /*
790 : * Open the table's indexes, if we have not done so already, so that we
791 : * can add new index entries for the inserted tuple.
792 : */
793 11144958 : if (resultRelationDesc->rd_rel->relhasindex &&
794 2790646 : resultRelInfo->ri_IndexRelationDescs == NULL)
795 28660 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
796 :
797 : /*
798 : * BEFORE ROW INSERT Triggers.
799 : *
800 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
801 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
802 : * violations before firing these triggers, because they can change the
803 : * values to insert. Also, they can run arbitrary user-defined code with
804 : * side-effects that we can't cancel by just not inserting the tuple.
805 : */
806 11144958 : if (resultRelInfo->ri_TrigDesc &&
807 74396 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
808 : {
809 : /* Flush any pending inserts, so rows are visible to the triggers */
810 2048 : if (estate->es_insert_pending_result_relations != NIL)
811 6 : ExecPendingInserts(estate);
812 :
813 2048 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
814 134 : return NULL; /* "do nothing" */
815 : }
816 :
817 : /* INSTEAD OF ROW INSERT Triggers */
818 11144708 : if (resultRelInfo->ri_TrigDesc &&
819 74146 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
820 : {
821 138 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
822 6 : return NULL; /* "do nothing" */
823 : }
824 11144570 : else if (resultRelInfo->ri_FdwRoutine)
825 : {
826 : /*
827 : * GENERATED expressions might reference the tableoid column, so
828 : * (re-)initialize tts_tableOid before evaluating them.
829 : */
830 2014 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
831 :
832 : /*
833 : * Compute stored generated columns
834 : */
835 2014 : if (resultRelationDesc->rd_att->constr &&
836 366 : resultRelationDesc->rd_att->constr->has_generated_stored)
837 8 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
838 : CMD_INSERT);
839 :
840 : /*
841 : * If the FDW supports batching, and batching is requested, accumulate
842 : * rows and insert them in batches. Otherwise use the per-row inserts.
843 : */
844 2014 : if (resultRelInfo->ri_BatchSize > 1)
845 : {
846 288 : bool flushed = false;
847 :
848 : /*
849 : * When we've reached the desired batch size, perform the
850 : * insertion.
851 : */
852 288 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
853 : {
854 20 : ExecBatchInsert(mtstate, resultRelInfo,
855 : resultRelInfo->ri_Slots,
856 : resultRelInfo->ri_PlanSlots,
857 : resultRelInfo->ri_NumSlots,
858 : estate, canSetTag);
859 20 : flushed = true;
860 : }
861 :
862 288 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
863 :
864 288 : if (resultRelInfo->ri_Slots == NULL)
865 : {
866 56 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
867 28 : resultRelInfo->ri_BatchSize);
868 28 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
869 28 : resultRelInfo->ri_BatchSize);
870 : }
871 :
872 : /*
873 : * Initialize the batch slots. We don't know how many slots will
874 : * be needed, so we initialize them as the batch grows, and we
875 : * keep them across batches. To mitigate an inefficiency in how
876 : * resource owner handles objects with many references (as with
877 : * many slots all referencing the same tuple descriptor) we copy
878 : * the appropriate tuple descriptor for each slot.
879 : */
880 288 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
881 : {
882 142 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
883 : TupleDesc plan_tdesc =
884 142 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
885 :
886 284 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
887 142 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
888 :
889 284 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
890 142 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
891 :
892 : /* remember how many batch slots we initialized */
893 142 : resultRelInfo->ri_NumSlotsInitialized++;
894 : }
895 :
896 288 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
897 : slot);
898 :
899 288 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
900 : planSlot);
901 :
902 : /*
903 : * If these are the first tuples stored in the buffers, add the
904 : * target rel and the mtstate to the
905 : * es_insert_pending_result_relations and
906 : * es_insert_pending_modifytables lists respectively, except in
907 : * the case where flushing was done above, in which case they
908 : * would already have been added to the lists, so no need to do
909 : * this.
910 : */
911 288 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
912 : {
913 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
914 : resultRelInfo));
915 36 : estate->es_insert_pending_result_relations =
916 36 : lappend(estate->es_insert_pending_result_relations,
917 : resultRelInfo);
918 36 : estate->es_insert_pending_modifytables =
919 36 : lappend(estate->es_insert_pending_modifytables, mtstate);
920 : }
921 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
922 : resultRelInfo));
923 :
924 288 : resultRelInfo->ri_NumSlots++;
925 :
926 288 : MemoryContextSwitchTo(oldContext);
927 :
928 288 : return NULL;
929 : }
930 :
931 : /*
932 : * insert into foreign table: let the FDW do it
933 : */
934 1726 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
935 : resultRelInfo,
936 : slot,
937 : planSlot);
938 :
939 1720 : if (slot == NULL) /* "do nothing" */
940 4 : return NULL;
941 :
942 : /*
943 : * AFTER ROW Triggers or RETURNING expressions might reference the
944 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
945 : * them. (This covers the case where the FDW replaced the slot.)
946 : */
947 1716 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
948 : }
949 : else
950 : {
951 : WCOKind wco_kind;
952 :
953 : /*
954 : * Constraints and GENERATED expressions might reference the tableoid
955 : * column, so (re-)initialize tts_tableOid before evaluating them.
956 : */
957 11142556 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
958 :
959 : /*
960 : * Compute stored generated columns
961 : */
962 11142556 : if (resultRelationDesc->rd_att->constr &&
963 3014710 : resultRelationDesc->rd_att->constr->has_generated_stored)
964 886 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
965 : CMD_INSERT);
966 :
967 : /*
968 : * Check any RLS WITH CHECK policies.
969 : *
970 : * Normally we should check INSERT policies. But if the insert is the
971 : * result of a partition key update that moved the tuple to a new
972 : * partition, we should instead check UPDATE policies, because we are
973 : * executing policies defined on the target table, and not those
974 : * defined on the child partitions.
975 : *
976 : * If we're running MERGE, we refer to the action that we're executing
977 : * to know if we're doing an INSERT or UPDATE to a partition table.
978 : */
979 11142544 : if (mtstate->operation == CMD_UPDATE)
980 686 : wco_kind = WCO_RLS_UPDATE_CHECK;
981 11141858 : else if (mtstate->operation == CMD_MERGE)
982 1510 : wco_kind = (context->relaction->mas_action->commandType == CMD_UPDATE) ?
983 1510 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
984 : else
985 11140348 : wco_kind = WCO_RLS_INSERT_CHECK;
986 :
987 : /*
988 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
989 : * we are looking for at this point.
990 : */
991 11142544 : if (resultRelInfo->ri_WithCheckOptions != NIL)
992 528 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
993 :
994 : /*
995 : * Check the constraints of the tuple.
996 : */
997 11142376 : if (resultRelationDesc->rd_att->constr)
998 3014620 : ExecConstraints(resultRelInfo, slot, estate);
999 :
1000 : /*
1001 : * Also check the tuple against the partition constraint, if there is
1002 : * one; except that if we got here via tuple-routing, we don't need to
1003 : * if there's no BR trigger defined on the partition.
1004 : */
1005 11141820 : if (resultRelationDesc->rd_rel->relispartition &&
1006 725138 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1007 720386 : (resultRelInfo->ri_TrigDesc &&
1008 1286 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1009 4948 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1010 :
1011 11141652 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1012 4010 : {
1013 : /* Perform a speculative insertion. */
1014 : uint32 specToken;
1015 : ItemPointerData conflictTid;
1016 : bool specConflict;
1017 : List *arbiterIndexes;
1018 :
1019 9380 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1020 :
1021 : /*
1022 : * Do a non-conclusive check for conflicts first.
1023 : *
1024 : * We're not holding any locks yet, so this doesn't guarantee that
1025 : * the later insert won't conflict. But it avoids leaving behind
1026 : * a lot of canceled speculative insertions, if you run a lot of
1027 : * INSERT ON CONFLICT statements that do conflict.
1028 : *
1029 : * We loop back here if we find a conflict below, either during
1030 : * the pre-check, or when we re-check after inserting the tuple
1031 : * speculatively. Better allow interrupts in case some bug makes
1032 : * this an infinite loop.
1033 : */
1034 9390 : vlock:
1035 9390 : CHECK_FOR_INTERRUPTS();
1036 9390 : specConflict = false;
1037 9390 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1038 : &conflictTid, arbiterIndexes))
1039 : {
1040 : /* committed conflict tuple found */
1041 5358 : if (onconflict == ONCONFLICT_UPDATE)
1042 : {
1043 : /*
1044 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1045 : * part. Be prepared to retry if the UPDATE fails because
1046 : * of another concurrent UPDATE/DELETE to the conflict
1047 : * tuple.
1048 : */
1049 5200 : TupleTableSlot *returning = NULL;
1050 :
1051 5200 : if (ExecOnConflictUpdate(context, resultRelInfo,
1052 : &conflictTid, slot, canSetTag,
1053 : &returning))
1054 : {
1055 5122 : InstrCountTuples2(&mtstate->ps, 1);
1056 5122 : return returning;
1057 : }
1058 : else
1059 0 : goto vlock;
1060 : }
1061 : else
1062 : {
1063 : /*
1064 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1065 : * verify that the tuple is visible to the executor's MVCC
1066 : * snapshot at higher isolation levels.
1067 : *
1068 : * Using ExecGetReturningSlot() to store the tuple for the
1069 : * recheck isn't that pretty, but we can't trivially use
1070 : * the input slot, because it might not be of a compatible
1071 : * type. As there's no conflicting usage of
1072 : * ExecGetReturningSlot() in the DO NOTHING case...
1073 : */
1074 : Assert(onconflict == ONCONFLICT_NOTHING);
1075 158 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1076 : ExecGetReturningSlot(estate, resultRelInfo));
1077 138 : InstrCountTuples2(&mtstate->ps, 1);
1078 138 : return NULL;
1079 : }
1080 : }
1081 :
1082 : /*
1083 : * Before we start insertion proper, acquire our "speculative
1084 : * insertion lock". Others can use that to wait for us to decide
1085 : * if we're going to go ahead with the insertion, instead of
1086 : * waiting for the whole transaction to complete.
1087 : */
1088 4026 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1089 :
1090 : /* insert the tuple, with the speculative token */
1091 4026 : table_tuple_insert_speculative(resultRelationDesc, slot,
1092 : estate->es_output_cid,
1093 : 0,
1094 : NULL,
1095 : specToken);
1096 :
1097 : /* insert index entries for tuple */
1098 4026 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1099 : slot, estate, false, true,
1100 : &specConflict,
1101 : arbiterIndexes,
1102 : false);
1103 :
1104 : /* adjust the tuple's state accordingly */
1105 4020 : table_tuple_complete_speculative(resultRelationDesc, slot,
1106 4020 : specToken, !specConflict);
1107 :
1108 : /*
1109 : * Wake up anyone waiting for our decision. They will re-check
1110 : * the tuple, see that it's no longer speculative, and wait on our
1111 : * XID as if this was a regularly inserted tuple all along. Or if
1112 : * we killed the tuple, they will see it's dead, and proceed as if
1113 : * the tuple never existed.
1114 : */
1115 4020 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1116 :
1117 : /*
1118 : * If there was a conflict, start from the beginning. We'll do
1119 : * the pre-check again, which will now find the conflicting tuple
1120 : * (unless it aborts before we get there).
1121 : */
1122 4020 : if (specConflict)
1123 : {
1124 10 : list_free(recheckIndexes);
1125 10 : goto vlock;
1126 : }
1127 :
1128 : /* Since there was no insertion conflict, we're done */
1129 : }
1130 : else
1131 : {
1132 : /* insert the tuple normally */
1133 11132272 : table_tuple_insert(resultRelationDesc, slot,
1134 : estate->es_output_cid,
1135 : 0, NULL);
1136 :
1137 : /* insert index entries for tuple */
1138 11132248 : if (resultRelInfo->ri_NumIndices > 0)
1139 2780674 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1140 : slot, estate, false,
1141 : false, NULL, NIL,
1142 : false);
1143 : }
1144 : }
1145 :
1146 11137590 : if (canSetTag)
1147 11136374 : (estate->es_processed)++;
1148 :
1149 : /*
1150 : * If this insert is the result of a partition key update that moved the
1151 : * tuple to a new partition, put this row into the transition NEW TABLE,
1152 : * if there is one. We need to do this separately for DELETE and INSERT
1153 : * because they happen on different tables.
1154 : */
1155 11137590 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1156 11137590 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1157 42 : && mtstate->mt_transition_capture->tcs_update_new_table)
1158 : {
1159 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1160 : NULL, NULL,
1161 : NULL,
1162 : NULL,
1163 : slot,
1164 : NULL,
1165 42 : mtstate->mt_transition_capture,
1166 : false);
1167 :
1168 : /*
1169 : * We've already captured the NEW TABLE row, so make sure any AR
1170 : * INSERT trigger fired below doesn't capture it again.
1171 : */
1172 42 : ar_insert_trig_tcs = NULL;
1173 : }
1174 :
1175 : /* AFTER ROW INSERT Triggers */
1176 11137590 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1177 : ar_insert_trig_tcs);
1178 :
1179 11137590 : list_free(recheckIndexes);
1180 :
1181 : /*
1182 : * Check any WITH CHECK OPTION constraints from parent views. We are
1183 : * required to do this after testing all constraints and uniqueness
1184 : * violations per the SQL spec, so we do it after actually inserting the
1185 : * record into the heap and all indexes.
1186 : *
1187 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1188 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1189 : *
1190 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1191 : * are looking for at this point.
1192 : */
1193 11137590 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1194 328 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1195 :
1196 : /* Process RETURNING if present */
1197 11137456 : if (resultRelInfo->ri_projectReturning)
1198 3324 : result = ExecProcessReturning(resultRelInfo, slot, planSlot);
1199 :
1200 11137444 : if (inserted_tuple)
1201 688 : *inserted_tuple = slot;
1202 11137444 : if (insert_destrel)
1203 688 : *insert_destrel = resultRelInfo;
1204 :
1205 11137444 : return result;
1206 : }
1207 :
1208 : /* ----------------------------------------------------------------
1209 : * ExecBatchInsert
1210 : *
1211 : * Insert multiple tuples in an efficient way.
1212 : * Currently, this handles inserting into a foreign table without
1213 : * RETURNING clause.
1214 : * ----------------------------------------------------------------
1215 : */
1216 : static void
1217 56 : ExecBatchInsert(ModifyTableState *mtstate,
1218 : ResultRelInfo *resultRelInfo,
1219 : TupleTableSlot **slots,
1220 : TupleTableSlot **planSlots,
1221 : int numSlots,
1222 : EState *estate,
1223 : bool canSetTag)
1224 : {
1225 : int i;
1226 56 : int numInserted = numSlots;
1227 56 : TupleTableSlot *slot = NULL;
1228 : TupleTableSlot **rslots;
1229 :
1230 : /*
1231 : * insert into foreign table: let the FDW do it
1232 : */
1233 56 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1234 : resultRelInfo,
1235 : slots,
1236 : planSlots,
1237 : &numInserted);
1238 :
1239 344 : for (i = 0; i < numInserted; i++)
1240 : {
1241 288 : slot = rslots[i];
1242 :
1243 : /*
1244 : * AFTER ROW Triggers might reference the tableoid column, so
1245 : * (re-)initialize tts_tableOid before evaluating them.
1246 : */
1247 288 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1248 :
1249 : /* AFTER ROW INSERT Triggers */
1250 288 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1251 288 : mtstate->mt_transition_capture);
1252 :
1253 : /*
1254 : * Check any WITH CHECK OPTION constraints from parent views. See the
1255 : * comment in ExecInsert.
1256 : */
1257 288 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1258 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1259 : }
1260 :
1261 56 : if (canSetTag && numInserted > 0)
1262 56 : estate->es_processed += numInserted;
1263 :
1264 : /* Clean up all the slots, ready for the next batch */
1265 344 : for (i = 0; i < numSlots; i++)
1266 : {
1267 288 : ExecClearTuple(slots[i]);
1268 288 : ExecClearTuple(planSlots[i]);
1269 : }
1270 56 : resultRelInfo->ri_NumSlots = 0;
1271 56 : }
1272 :
1273 : /*
1274 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1275 : */
1276 : static void
1277 34 : ExecPendingInserts(EState *estate)
1278 : {
1279 : ListCell *l1,
1280 : *l2;
1281 :
1282 70 : forboth(l1, estate->es_insert_pending_result_relations,
1283 : l2, estate->es_insert_pending_modifytables)
1284 : {
1285 36 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1286 36 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1287 :
1288 : Assert(mtstate);
1289 36 : ExecBatchInsert(mtstate, resultRelInfo,
1290 : resultRelInfo->ri_Slots,
1291 : resultRelInfo->ri_PlanSlots,
1292 : resultRelInfo->ri_NumSlots,
1293 36 : estate, mtstate->canSetTag);
1294 : }
1295 :
1296 34 : list_free(estate->es_insert_pending_result_relations);
1297 34 : list_free(estate->es_insert_pending_modifytables);
1298 34 : estate->es_insert_pending_result_relations = NIL;
1299 34 : estate->es_insert_pending_modifytables = NIL;
1300 34 : }
1301 :
1302 : /*
1303 : * ExecDeletePrologue -- subroutine for ExecDelete
1304 : *
1305 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1306 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1307 : * the delete a no-op; otherwise, return true.
1308 : */
1309 : static bool
1310 1525670 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1311 : ItemPointer tupleid, HeapTuple oldtuple,
1312 : TupleTableSlot **epqreturnslot, TM_Result *result)
1313 : {
1314 1525670 : if (result)
1315 262 : *result = TM_Ok;
1316 :
1317 : /* BEFORE ROW DELETE triggers */
1318 1525670 : if (resultRelInfo->ri_TrigDesc &&
1319 6782 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1320 : {
1321 : /* Flush any pending inserts, so rows are visible to the triggers */
1322 352 : if (context->estate->es_insert_pending_result_relations != NIL)
1323 2 : ExecPendingInserts(context->estate);
1324 :
1325 352 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1326 : resultRelInfo, tupleid, oldtuple,
1327 : epqreturnslot, result, &context->tmfd);
1328 : }
1329 :
1330 1525318 : return true;
1331 : }
1332 :
1333 : /*
1334 : * ExecDeleteAct -- subroutine for ExecDelete
1335 : *
1336 : * Actually delete the tuple from a plain table.
1337 : *
1338 : * Caller is in charge of doing EvalPlanQual as necessary
1339 : */
1340 : static TM_Result
1341 1525512 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1342 : ItemPointer tupleid, bool changingPart)
1343 : {
1344 1525512 : EState *estate = context->estate;
1345 :
1346 1525512 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1347 : estate->es_output_cid,
1348 : estate->es_snapshot,
1349 : estate->es_crosscheck_snapshot,
1350 : true /* wait for commit */ ,
1351 : &context->tmfd,
1352 : changingPart);
1353 : }
1354 :
1355 : /*
1356 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1357 : *
1358 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1359 : * including the UPDATE triggers if the deletion is being done as part of a
1360 : * cross-partition tuple move.
1361 : */
1362 : static void
1363 1525454 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1364 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1365 : {
1366 1525454 : ModifyTableState *mtstate = context->mtstate;
1367 1525454 : EState *estate = context->estate;
1368 : TransitionCaptureState *ar_delete_trig_tcs;
1369 :
1370 : /*
1371 : * If this delete is the result of a partition key update that moved the
1372 : * tuple to a new partition, put this row into the transition OLD TABLE,
1373 : * if there is one. We need to do this separately for DELETE and INSERT
1374 : * because they happen on different tables.
1375 : */
1376 1525454 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1377 1525454 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1378 42 : mtstate->mt_transition_capture->tcs_update_old_table)
1379 : {
1380 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1381 : NULL, NULL,
1382 : tupleid, oldtuple,
1383 42 : NULL, NULL, mtstate->mt_transition_capture,
1384 : false);
1385 :
1386 : /*
1387 : * We've already captured the OLD TABLE row, so make sure any AR
1388 : * DELETE trigger fired below doesn't capture it again.
1389 : */
1390 42 : ar_delete_trig_tcs = NULL;
1391 : }
1392 :
1393 : /* AFTER ROW DELETE Triggers */
1394 1525454 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1395 : ar_delete_trig_tcs, changingPart);
1396 1525454 : }
1397 :
1398 : /* ----------------------------------------------------------------
1399 : * ExecDelete
1400 : *
1401 : * DELETE is like UPDATE, except that we delete the tuple and no
1402 : * index modifications are needed.
1403 : *
1404 : * When deleting from a table, tupleid identifies the tuple to
1405 : * delete and oldtuple is NULL. When deleting from a view,
1406 : * oldtuple is passed to the INSTEAD OF triggers and identifies
1407 : * what to delete, and tupleid is invalid. When deleting from a
1408 : * foreign table, tupleid is invalid; the FDW has to figure out
1409 : * which row to delete using data from the planSlot. oldtuple is
1410 : * passed to foreign table triggers; it is NULL when the foreign
1411 : * table has no relevant triggers. We use tupleDeleted to indicate
1412 : * whether the tuple is actually deleted, callers can use it to
1413 : * decide whether to continue the operation. When this DELETE is a
1414 : * part of an UPDATE of partition-key, then the slot returned by
1415 : * EvalPlanQual() is passed back using output parameter epqreturnslot.
1416 : *
1417 : * Returns RETURNING result if any, otherwise NULL.
1418 : * ----------------------------------------------------------------
1419 : */
1420 : static TupleTableSlot *
1421 1525408 : ExecDelete(ModifyTableContext *context,
1422 : ResultRelInfo *resultRelInfo,
1423 : ItemPointer tupleid,
1424 : HeapTuple oldtuple,
1425 : bool processReturning,
1426 : bool changingPart,
1427 : bool canSetTag,
1428 : bool *tupleDeleted,
1429 : TupleTableSlot **epqreturnslot)
1430 : {
1431 1525408 : EState *estate = context->estate;
1432 1525408 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1433 1525408 : TupleTableSlot *slot = NULL;
1434 : TM_Result result;
1435 :
1436 1525408 : if (tupleDeleted)
1437 874 : *tupleDeleted = false;
1438 :
1439 : /*
1440 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1441 : * done if it says we are.
1442 : */
1443 1525408 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1444 : epqreturnslot, NULL))
1445 28 : return NULL;
1446 :
1447 : /* INSTEAD OF ROW DELETE Triggers */
1448 1525346 : if (resultRelInfo->ri_TrigDesc &&
1449 6682 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1450 48 : {
1451 : bool dodelete;
1452 :
1453 : Assert(oldtuple != NULL);
1454 54 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1455 :
1456 54 : if (!dodelete) /* "do nothing" */
1457 6 : return NULL;
1458 : }
1459 1525292 : else if (resultRelInfo->ri_FdwRoutine)
1460 : {
1461 : /*
1462 : * delete from foreign table: let the FDW do it
1463 : *
1464 : * We offer the returning slot as a place to store RETURNING data,
1465 : * although the FDW can return some other slot if it wants.
1466 : */
1467 34 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1468 34 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1469 : resultRelInfo,
1470 : slot,
1471 : context->planSlot);
1472 :
1473 34 : if (slot == NULL) /* "do nothing" */
1474 0 : return NULL;
1475 :
1476 : /*
1477 : * RETURNING expressions might reference the tableoid column, so
1478 : * (re)initialize tts_tableOid before evaluating them.
1479 : */
1480 34 : if (TTS_EMPTY(slot))
1481 6 : ExecStoreAllNullTuple(slot);
1482 :
1483 34 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1484 : }
1485 : else
1486 : {
1487 : /*
1488 : * delete the tuple
1489 : *
1490 : * Note: if context->estate->es_crosscheck_snapshot isn't
1491 : * InvalidSnapshot, we check that the row to be deleted is visible to
1492 : * that snapshot, and throw a can't-serialize error if not. This is a
1493 : * special-case behavior needed for referential integrity updates in
1494 : * transaction-snapshot mode transactions.
1495 : */
1496 1525258 : ldelete:
1497 1525262 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1498 :
1499 1525226 : switch (result)
1500 : {
1501 30 : case TM_SelfModified:
1502 :
1503 : /*
1504 : * The target tuple was already updated or deleted by the
1505 : * current command, or by a later command in the current
1506 : * transaction. The former case is possible in a join DELETE
1507 : * where multiple tuples join to the same target tuple. This
1508 : * is somewhat questionable, but Postgres has always allowed
1509 : * it: we just ignore additional deletion attempts.
1510 : *
1511 : * The latter case arises if the tuple is modified by a
1512 : * command in a BEFORE trigger, or perhaps by a command in a
1513 : * volatile function used in the query. In such situations we
1514 : * should not ignore the deletion, but it is equally unsafe to
1515 : * proceed. We don't want to discard the original DELETE
1516 : * while keeping the triggered actions based on its deletion;
1517 : * and it would be no better to allow the original DELETE
1518 : * while discarding updates that it triggered. The row update
1519 : * carries some information that might be important according
1520 : * to business rules; so throwing an error is the only safe
1521 : * course.
1522 : *
1523 : * If a trigger actually intends this type of interaction, it
1524 : * can re-execute the DELETE and then return NULL to cancel
1525 : * the outer delete.
1526 : */
1527 30 : if (context->tmfd.cmax != estate->es_output_cid)
1528 6 : ereport(ERROR,
1529 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1530 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1531 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1532 :
1533 : /* Else, already deleted by self; nothing to do */
1534 24 : return NULL;
1535 :
1536 1525134 : case TM_Ok:
1537 1525134 : break;
1538 :
1539 56 : case TM_Updated:
1540 : {
1541 : TupleTableSlot *inputslot;
1542 : TupleTableSlot *epqslot;
1543 :
1544 56 : if (IsolationUsesXactSnapshot())
1545 2 : ereport(ERROR,
1546 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1547 : errmsg("could not serialize access due to concurrent update")));
1548 :
1549 : /*
1550 : * Already know that we're going to need to do EPQ, so
1551 : * fetch tuple directly into the right slot.
1552 : */
1553 54 : EvalPlanQualBegin(context->epqstate);
1554 54 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1555 : resultRelInfo->ri_RangeTableIndex);
1556 :
1557 54 : result = table_tuple_lock(resultRelationDesc, tupleid,
1558 : estate->es_snapshot,
1559 : inputslot, estate->es_output_cid,
1560 : LockTupleExclusive, LockWaitBlock,
1561 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1562 : &context->tmfd);
1563 :
1564 50 : switch (result)
1565 : {
1566 44 : case TM_Ok:
1567 : Assert(context->tmfd.traversed);
1568 44 : epqslot = EvalPlanQual(context->epqstate,
1569 : resultRelationDesc,
1570 : resultRelInfo->ri_RangeTableIndex,
1571 : inputslot);
1572 44 : if (TupIsNull(epqslot))
1573 : /* Tuple not passing quals anymore, exiting... */
1574 28 : return NULL;
1575 :
1576 : /*
1577 : * If requested, skip delete and pass back the
1578 : * updated row.
1579 : */
1580 16 : if (epqreturnslot)
1581 : {
1582 12 : *epqreturnslot = epqslot;
1583 12 : return NULL;
1584 : }
1585 : else
1586 4 : goto ldelete;
1587 :
1588 4 : case TM_SelfModified:
1589 :
1590 : /*
1591 : * This can be reached when following an update
1592 : * chain from a tuple updated by another session,
1593 : * reaching a tuple that was already updated in
1594 : * this transaction. If previously updated by this
1595 : * command, ignore the delete, otherwise error
1596 : * out.
1597 : *
1598 : * See also TM_SelfModified response to
1599 : * table_tuple_delete() above.
1600 : */
1601 4 : if (context->tmfd.cmax != estate->es_output_cid)
1602 2 : ereport(ERROR,
1603 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1604 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1605 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1606 2 : return NULL;
1607 :
1608 2 : case TM_Deleted:
1609 : /* tuple already deleted; nothing to do */
1610 2 : return NULL;
1611 :
1612 0 : default:
1613 :
1614 : /*
1615 : * TM_Invisible should be impossible because we're
1616 : * waiting for updated row versions, and would
1617 : * already have errored out if the first version
1618 : * is invisible.
1619 : *
1620 : * TM_Updated should be impossible, because we're
1621 : * locking the latest version via
1622 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1623 : */
1624 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1625 : result);
1626 : return NULL;
1627 : }
1628 :
1629 : Assert(false);
1630 : break;
1631 : }
1632 :
1633 6 : case TM_Deleted:
1634 6 : if (IsolationUsesXactSnapshot())
1635 0 : ereport(ERROR,
1636 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1637 : errmsg("could not serialize access due to concurrent delete")));
1638 : /* tuple already deleted; nothing to do */
1639 6 : return NULL;
1640 :
1641 0 : default:
1642 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1643 : result);
1644 : return NULL;
1645 : }
1646 :
1647 : /*
1648 : * Note: Normally one would think that we have to delete index tuples
1649 : * associated with the heap tuple now...
1650 : *
1651 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1652 : * take care of it later. We can't delete index tuples immediately
1653 : * anyway, since the tuple is still visible to other transactions.
1654 : */
1655 : }
1656 :
1657 1525216 : if (canSetTag)
1658 1524168 : (estate->es_processed)++;
1659 :
1660 : /* Tell caller that the delete actually happened. */
1661 1525216 : if (tupleDeleted)
1662 820 : *tupleDeleted = true;
1663 :
1664 1525216 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1665 :
1666 : /* Process RETURNING if present and if requested */
1667 1525216 : if (processReturning && resultRelInfo->ri_projectReturning)
1668 : {
1669 : /*
1670 : * We have to put the target tuple into a slot, which means first we
1671 : * gotta fetch it. We can use the trigger tuple slot.
1672 : */
1673 : TupleTableSlot *rslot;
1674 :
1675 872 : if (resultRelInfo->ri_FdwRoutine)
1676 : {
1677 : /* FDW must have provided a slot containing the deleted row */
1678 : Assert(!TupIsNull(slot));
1679 : }
1680 : else
1681 : {
1682 866 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1683 866 : if (oldtuple != NULL)
1684 : {
1685 24 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1686 : }
1687 : else
1688 : {
1689 842 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1690 : SnapshotAny, slot))
1691 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1692 : }
1693 : }
1694 :
1695 872 : rslot = ExecProcessReturning(resultRelInfo, slot, context->planSlot);
1696 :
1697 : /*
1698 : * Before releasing the target tuple again, make sure rslot has a
1699 : * local copy of any pass-by-reference values.
1700 : */
1701 872 : ExecMaterializeSlot(rslot);
1702 :
1703 872 : ExecClearTuple(slot);
1704 :
1705 872 : return rslot;
1706 : }
1707 :
1708 1524344 : return NULL;
1709 : }
1710 :
1711 : /*
1712 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1713 : *
1714 : * This works by first deleting the old tuple from the current partition,
1715 : * followed by inserting the new tuple into the root parent table, that is,
1716 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1717 : * correct partition.
1718 : *
1719 : * Returns true if the tuple has been successfully moved, or if it's found
1720 : * that the tuple was concurrently deleted so there's nothing more to do
1721 : * for the caller.
1722 : *
1723 : * False is returned if the tuple we're trying to move is found to have been
1724 : * concurrently updated. In that case, the caller must check if the updated
1725 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1726 : * this function again or perform a regular update accordingly. For MERGE,
1727 : * the updated tuple is not returned in *retry_slot; it has its own retry
1728 : * logic.
1729 : */
1730 : static bool
1731 916 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1732 : ResultRelInfo *resultRelInfo,
1733 : ItemPointer tupleid, HeapTuple oldtuple,
1734 : TupleTableSlot *slot,
1735 : bool canSetTag,
1736 : UpdateContext *updateCxt,
1737 : TupleTableSlot **retry_slot,
1738 : TupleTableSlot **inserted_tuple,
1739 : ResultRelInfo **insert_destrel)
1740 : {
1741 916 : ModifyTableState *mtstate = context->mtstate;
1742 916 : EState *estate = mtstate->ps.state;
1743 : TupleConversionMap *tupconv_map;
1744 : bool tuple_deleted;
1745 916 : TupleTableSlot *epqslot = NULL;
1746 :
1747 916 : context->cpUpdateReturningSlot = NULL;
1748 916 : *retry_slot = NULL;
1749 :
1750 : /*
1751 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1752 : * to migrate to a different partition. Maybe this can be implemented
1753 : * some day, but it seems a fringe feature with little redeeming value.
1754 : */
1755 916 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1756 0 : ereport(ERROR,
1757 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1758 : errmsg("invalid ON UPDATE specification"),
1759 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1760 :
1761 : /*
1762 : * When an UPDATE is run directly on a leaf partition, simply fail with a
1763 : * partition constraint violation error.
1764 : */
1765 916 : if (resultRelInfo == mtstate->rootResultRelInfo)
1766 42 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1767 :
1768 : /* Initialize tuple routing info if not already done. */
1769 874 : if (mtstate->mt_partition_tuple_routing == NULL)
1770 : {
1771 584 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1772 : MemoryContext oldcxt;
1773 :
1774 : /* Things built here have to last for the query duration. */
1775 584 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1776 :
1777 584 : mtstate->mt_partition_tuple_routing =
1778 584 : ExecSetupPartitionTupleRouting(estate, rootRel);
1779 :
1780 : /*
1781 : * Before a partition's tuple can be re-routed, it must first be
1782 : * converted to the root's format, so we'll need a slot for storing
1783 : * such tuples.
1784 : */
1785 : Assert(mtstate->mt_root_tuple_slot == NULL);
1786 584 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1787 :
1788 584 : MemoryContextSwitchTo(oldcxt);
1789 : }
1790 :
1791 : /*
1792 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1793 : * We want to return rows from INSERT.
1794 : */
1795 874 : ExecDelete(context, resultRelInfo,
1796 : tupleid, oldtuple,
1797 : false, /* processReturning */
1798 : true, /* changingPart */
1799 : false, /* canSetTag */
1800 : &tuple_deleted, &epqslot);
1801 :
1802 : /*
1803 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
1804 : * it was already deleted by self, or it was concurrently deleted by
1805 : * another transaction), then we should skip the insert as well;
1806 : * otherwise, an UPDATE could cause an increase in the total number of
1807 : * rows across all partitions, which is clearly wrong.
1808 : *
1809 : * For a normal UPDATE, the case where the tuple has been the subject of a
1810 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
1811 : * machinery, but for an UPDATE that we've translated into a DELETE from
1812 : * this partition and an INSERT into some other partition, that's not
1813 : * available, because CTID chains can't span relation boundaries. We
1814 : * mimic the semantics to a limited extent by skipping the INSERT if the
1815 : * DELETE fails to find a tuple. This ensures that two concurrent
1816 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
1817 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
1818 : * it.
1819 : */
1820 872 : if (!tuple_deleted)
1821 : {
1822 : /*
1823 : * epqslot will be typically NULL. But when ExecDelete() finds that
1824 : * another transaction has concurrently updated the same row, it
1825 : * re-fetches the row, skips the delete, and epqslot is set to the
1826 : * re-fetched tuple slot. In that case, we need to do all the checks
1827 : * again. For MERGE, we leave everything to the caller (it must do
1828 : * additional rechecking, and might end up executing a different
1829 : * action entirely).
1830 : */
1831 52 : if (context->relaction != NULL)
1832 10 : return false;
1833 42 : else if (TupIsNull(epqslot))
1834 36 : return true;
1835 : else
1836 : {
1837 : /* Fetch the most recent version of old tuple. */
1838 : TupleTableSlot *oldSlot;
1839 :
1840 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
1841 6 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
1842 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
1843 6 : oldSlot = resultRelInfo->ri_oldTupleSlot;
1844 6 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
1845 : tupleid,
1846 : SnapshotAny,
1847 : oldSlot))
1848 0 : elog(ERROR, "failed to fetch tuple being updated");
1849 : /* and project the new tuple to retry the UPDATE with */
1850 6 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
1851 : oldSlot);
1852 6 : return false;
1853 : }
1854 : }
1855 :
1856 : /*
1857 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
1858 : * convert the tuple into root's tuple descriptor if needed, since
1859 : * ExecInsert() starts the search from root.
1860 : */
1861 820 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1862 820 : if (tupconv_map != NULL)
1863 284 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1864 : slot,
1865 : mtstate->mt_root_tuple_slot);
1866 :
1867 : /* Tuple routing starts from the root table. */
1868 692 : context->cpUpdateReturningSlot =
1869 820 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
1870 : inserted_tuple, insert_destrel);
1871 :
1872 : /*
1873 : * Reset the transition state that may possibly have been written by
1874 : * INSERT.
1875 : */
1876 692 : if (mtstate->mt_transition_capture)
1877 42 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
1878 :
1879 : /* We're done moving. */
1880 692 : return true;
1881 : }
1882 :
1883 : /*
1884 : * ExecUpdatePrologue -- subroutine for ExecUpdate
1885 : *
1886 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
1887 : * triggers. We return false if one of them makes the update a no-op;
1888 : * otherwise, return true.
1889 : */
1890 : static bool
1891 309612 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1892 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1893 : TM_Result *result)
1894 : {
1895 309612 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1896 :
1897 309612 : if (result)
1898 1536 : *result = TM_Ok;
1899 :
1900 309612 : ExecMaterializeSlot(slot);
1901 :
1902 : /*
1903 : * Open the table's indexes, if we have not done so already, so that we
1904 : * can add new index entries for the updated tuple.
1905 : */
1906 309612 : if (resultRelationDesc->rd_rel->relhasindex &&
1907 219852 : resultRelInfo->ri_IndexRelationDescs == NULL)
1908 7912 : ExecOpenIndices(resultRelInfo, false);
1909 :
1910 : /* BEFORE ROW UPDATE triggers */
1911 309612 : if (resultRelInfo->ri_TrigDesc &&
1912 5702 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
1913 : {
1914 : /* Flush any pending inserts, so rows are visible to the triggers */
1915 2542 : if (context->estate->es_insert_pending_result_relations != NIL)
1916 2 : ExecPendingInserts(context->estate);
1917 :
1918 2542 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
1919 : resultRelInfo, tupleid, oldtuple, slot,
1920 : result, &context->tmfd);
1921 : }
1922 :
1923 307070 : return true;
1924 : }
1925 :
1926 : /*
1927 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
1928 : *
1929 : * Apply the final modifications to the tuple slot before the update.
1930 : * (This is split out because we also need it in the foreign-table code path.)
1931 : */
1932 : static void
1933 309408 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
1934 : TupleTableSlot *slot,
1935 : EState *estate)
1936 : {
1937 309408 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1938 :
1939 : /*
1940 : * Constraints and GENERATED expressions might reference the tableoid
1941 : * column, so (re-)initialize tts_tableOid before evaluating them.
1942 : */
1943 309408 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1944 :
1945 : /*
1946 : * Compute stored generated columns
1947 : */
1948 309408 : if (resultRelationDesc->rd_att->constr &&
1949 182916 : resultRelationDesc->rd_att->constr->has_generated_stored)
1950 258 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1951 : CMD_UPDATE);
1952 309408 : }
1953 :
1954 : /*
1955 : * ExecUpdateAct -- subroutine for ExecUpdate
1956 : *
1957 : * Actually update the tuple, when operating on a plain table. If the
1958 : * table is a partition, and the command was called referencing an ancestor
1959 : * partitioned table, this routine migrates the resulting tuple to another
1960 : * partition.
1961 : *
1962 : * The caller is in charge of keeping indexes current as necessary. The
1963 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
1964 : * be concurrently updated. However, in case of a cross-partition update,
1965 : * this routine does it.
1966 : */
1967 : static TM_Result
1968 309260 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1969 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1970 : bool canSetTag, UpdateContext *updateCxt)
1971 : {
1972 309260 : EState *estate = context->estate;
1973 309260 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1974 : bool partition_constraint_failed;
1975 : TM_Result result;
1976 :
1977 309260 : updateCxt->crossPartUpdate = false;
1978 :
1979 : /*
1980 : * If we move the tuple to a new partition, we loop back here to recompute
1981 : * GENERATED values (which are allowed to be different across partitions)
1982 : * and recheck any RLS policies and constraints. We do not fire any
1983 : * BEFORE triggers of the new partition, however.
1984 : */
1985 309266 : lreplace:
1986 : /* Fill in GENERATEd columns */
1987 309266 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
1988 :
1989 : /* ensure slot is independent, consider e.g. EPQ */
1990 309266 : ExecMaterializeSlot(slot);
1991 :
1992 : /*
1993 : * If partition constraint fails, this row might get moved to another
1994 : * partition, in which case we should check the RLS CHECK policy just
1995 : * before inserting into the new partition, rather than doing it here.
1996 : * This is because a trigger on that partition might again change the row.
1997 : * So skip the WCO checks if the partition constraint fails.
1998 : */
1999 309266 : partition_constraint_failed =
2000 311544 : resultRelationDesc->rd_rel->relispartition &&
2001 2278 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2002 :
2003 : /* Check any RLS UPDATE WITH CHECK policies */
2004 309266 : if (!partition_constraint_failed &&
2005 308350 : resultRelInfo->ri_WithCheckOptions != NIL)
2006 : {
2007 : /*
2008 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2009 : * we are looking for at this point.
2010 : */
2011 462 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2012 : resultRelInfo, slot, estate);
2013 : }
2014 :
2015 : /*
2016 : * If a partition check failed, try to move the row into the right
2017 : * partition.
2018 : */
2019 309212 : if (partition_constraint_failed)
2020 : {
2021 : TupleTableSlot *inserted_tuple,
2022 : *retry_slot;
2023 916 : ResultRelInfo *insert_destrel = NULL;
2024 :
2025 : /*
2026 : * ExecCrossPartitionUpdate will first DELETE the row from the
2027 : * partition it's currently in and then insert it back into the root
2028 : * table, which will re-route it to the correct partition. However,
2029 : * if the tuple has been concurrently updated, a retry is needed.
2030 : */
2031 916 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2032 : tupleid, oldtuple, slot,
2033 : canSetTag, updateCxt,
2034 : &retry_slot,
2035 : &inserted_tuple,
2036 : &insert_destrel))
2037 : {
2038 : /* success! */
2039 728 : updateCxt->updated = true;
2040 728 : updateCxt->crossPartUpdate = true;
2041 :
2042 : /*
2043 : * If the partitioned table being updated is referenced in foreign
2044 : * keys, queue up trigger events to check that none of them were
2045 : * violated. No special treatment is needed in
2046 : * non-cross-partition update situations, because the leaf
2047 : * partition's AR update triggers will take care of that. During
2048 : * cross-partition updates implemented as delete on the source
2049 : * partition followed by insert on the destination partition,
2050 : * AR-UPDATE triggers of the root table (that is, the table
2051 : * mentioned in the query) must be fired.
2052 : *
2053 : * NULL insert_destrel means that the move failed to occur, that
2054 : * is, the update failed, so no need to anything in that case.
2055 : */
2056 728 : if (insert_destrel &&
2057 688 : resultRelInfo->ri_TrigDesc &&
2058 314 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2059 252 : ExecCrossPartitionUpdateForeignKey(context,
2060 : resultRelInfo,
2061 : insert_destrel,
2062 : tupleid, slot,
2063 : inserted_tuple);
2064 :
2065 732 : return TM_Ok;
2066 : }
2067 :
2068 : /*
2069 : * No luck, a retry is needed. If running MERGE, we do not do so
2070 : * here; instead let it handle that on its own rules.
2071 : */
2072 16 : if (context->relaction != NULL)
2073 10 : return TM_Updated;
2074 :
2075 : /*
2076 : * ExecCrossPartitionUpdate installed an updated version of the new
2077 : * tuple in the retry slot; start over.
2078 : */
2079 6 : slot = retry_slot;
2080 6 : goto lreplace;
2081 : }
2082 :
2083 : /*
2084 : * Check the constraints of the tuple. We've already checked the
2085 : * partition constraint above; however, we must still ensure the tuple
2086 : * passes all other constraints, so we will call ExecConstraints() and
2087 : * have it validate all remaining checks.
2088 : */
2089 308296 : if (resultRelationDesc->rd_att->constr)
2090 182392 : ExecConstraints(resultRelInfo, slot, estate);
2091 :
2092 : /*
2093 : * replace the heap tuple
2094 : *
2095 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2096 : * the row to be updated is visible to that snapshot, and throw a
2097 : * can't-serialize error if not. This is a special-case behavior needed
2098 : * for referential integrity updates in transaction-snapshot mode
2099 : * transactions.
2100 : */
2101 308246 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2102 : estate->es_output_cid,
2103 : estate->es_snapshot,
2104 : estate->es_crosscheck_snapshot,
2105 : true /* wait for commit */ ,
2106 : &context->tmfd, &updateCxt->lockmode,
2107 : &updateCxt->updateIndexes);
2108 308222 : if (result == TM_Ok)
2109 307940 : updateCxt->updated = true;
2110 :
2111 308222 : return result;
2112 : }
2113 :
2114 : /*
2115 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2116 : *
2117 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2118 : * returns indicating that the tuple was updated.
2119 : */
2120 : static void
2121 308170 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2122 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2123 : HeapTuple oldtuple, TupleTableSlot *slot)
2124 : {
2125 308170 : ModifyTableState *mtstate = context->mtstate;
2126 308170 : List *recheckIndexes = NIL;
2127 :
2128 : /* insert index entries for tuple if necessary */
2129 308170 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2130 166128 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2131 : slot, context->estate,
2132 : true, false,
2133 : NULL, NIL,
2134 166128 : (updateCxt->updateIndexes == TU_Summarizing));
2135 :
2136 : /* AFTER ROW UPDATE Triggers */
2137 308152 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2138 : NULL, NULL,
2139 : tupleid, oldtuple, slot,
2140 : recheckIndexes,
2141 308152 : mtstate->operation == CMD_INSERT ?
2142 : mtstate->mt_oc_transition_capture :
2143 : mtstate->mt_transition_capture,
2144 : false);
2145 :
2146 308152 : list_free(recheckIndexes);
2147 :
2148 : /*
2149 : * Check any WITH CHECK OPTION constraints from parent views. We are
2150 : * required to do this after testing all constraints and uniqueness
2151 : * violations per the SQL spec, so we do it after actually updating the
2152 : * record in the heap and all indexes.
2153 : *
2154 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2155 : * are looking for at this point.
2156 : */
2157 308152 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2158 430 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2159 : slot, context->estate);
2160 308088 : }
2161 :
2162 : /*
2163 : * Queues up an update event using the target root partitioned table's
2164 : * trigger to check that a cross-partition update hasn't broken any foreign
2165 : * keys pointing into it.
2166 : */
2167 : static void
2168 252 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2169 : ResultRelInfo *sourcePartInfo,
2170 : ResultRelInfo *destPartInfo,
2171 : ItemPointer tupleid,
2172 : TupleTableSlot *oldslot,
2173 : TupleTableSlot *newslot)
2174 : {
2175 : ListCell *lc;
2176 : ResultRelInfo *rootRelInfo;
2177 : List *ancestorRels;
2178 :
2179 252 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2180 252 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2181 :
2182 : /*
2183 : * For any foreign keys that point directly into a non-root ancestors of
2184 : * the source partition, we can in theory fire an update event to enforce
2185 : * those constraints using their triggers, if we could tell that both the
2186 : * source and the destination partitions are under the same ancestor. But
2187 : * for now, we simply report an error that those cannot be enforced.
2188 : */
2189 558 : foreach(lc, ancestorRels)
2190 : {
2191 312 : ResultRelInfo *rInfo = lfirst(lc);
2192 312 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2193 312 : bool has_noncloned_fkey = false;
2194 :
2195 : /* Root ancestor's triggers will be processed. */
2196 312 : if (rInfo == rootRelInfo)
2197 246 : continue;
2198 :
2199 66 : if (trigdesc && trigdesc->trig_update_after_row)
2200 : {
2201 228 : for (int i = 0; i < trigdesc->numtriggers; i++)
2202 : {
2203 168 : Trigger *trig = &trigdesc->triggers[i];
2204 :
2205 174 : if (!trig->tgisclone &&
2206 6 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2207 : {
2208 6 : has_noncloned_fkey = true;
2209 6 : break;
2210 : }
2211 : }
2212 : }
2213 :
2214 66 : if (has_noncloned_fkey)
2215 6 : ereport(ERROR,
2216 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2217 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2218 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2219 : RelationGetRelationName(rInfo->ri_RelationDesc),
2220 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2221 : errhint("Consider defining the foreign key on table \"%s\".",
2222 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2223 : }
2224 :
2225 : /* Perform the root table's triggers. */
2226 246 : ExecARUpdateTriggers(context->estate,
2227 : rootRelInfo, sourcePartInfo, destPartInfo,
2228 : tupleid, NULL, newslot, NIL, NULL, true);
2229 246 : }
2230 :
2231 : /* ----------------------------------------------------------------
2232 : * ExecUpdate
2233 : *
2234 : * note: we can't run UPDATE queries with transactions
2235 : * off because UPDATEs are actually INSERTs and our
2236 : * scan will mistakenly loop forever, updating the tuple
2237 : * it just inserted.. This should be fixed but until it
2238 : * is, we don't want to get stuck in an infinite loop
2239 : * which corrupts your database..
2240 : *
2241 : * When updating a table, tupleid identifies the tuple to
2242 : * update and oldtuple is NULL. When updating a view, oldtuple
2243 : * is passed to the INSTEAD OF triggers and identifies what to
2244 : * update, and tupleid is invalid. When updating a foreign table,
2245 : * tupleid is invalid; the FDW has to figure out which row to
2246 : * update using data from the planSlot. oldtuple is passed to
2247 : * foreign table triggers; it is NULL when the foreign table has
2248 : * no relevant triggers.
2249 : *
2250 : * slot contains the new tuple value to be stored.
2251 : * planSlot is the output of the ModifyTable's subplan; we use it
2252 : * to access values from other input tables (for RETURNING),
2253 : * row-ID junk columns, etc.
2254 : *
2255 : * Returns RETURNING result if any, otherwise NULL.
2256 : * ----------------------------------------------------------------
2257 : */
2258 : static TupleTableSlot *
2259 308076 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2260 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2261 : bool canSetTag)
2262 : {
2263 308076 : EState *estate = context->estate;
2264 308076 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2265 308076 : UpdateContext updateCxt = {0};
2266 : TM_Result result;
2267 :
2268 : /*
2269 : * abort the operation if not running transactions
2270 : */
2271 308076 : if (IsBootstrapProcessingMode())
2272 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2273 :
2274 : /*
2275 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2276 : * done if it says we are.
2277 : */
2278 308076 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2279 138 : return NULL;
2280 :
2281 : /* INSTEAD OF ROW UPDATE Triggers */
2282 307902 : if (resultRelInfo->ri_TrigDesc &&
2283 5368 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2284 : {
2285 114 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2286 : oldtuple, slot))
2287 18 : return NULL; /* "do nothing" */
2288 : }
2289 307788 : else if (resultRelInfo->ri_FdwRoutine)
2290 : {
2291 : /* Fill in GENERATEd columns */
2292 142 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2293 :
2294 : /*
2295 : * update in foreign table: let the FDW do it
2296 : */
2297 142 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2298 : resultRelInfo,
2299 : slot,
2300 : context->planSlot);
2301 :
2302 142 : if (slot == NULL) /* "do nothing" */
2303 2 : return NULL;
2304 :
2305 : /*
2306 : * AFTER ROW Triggers or RETURNING expressions might reference the
2307 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2308 : * them. (This covers the case where the FDW replaced the slot.)
2309 : */
2310 140 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2311 : }
2312 : else
2313 : {
2314 : /*
2315 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2316 : * must loop back here to try again. (We don't need to redo triggers,
2317 : * however. If there are any BEFORE triggers then trigger.c will have
2318 : * done table_tuple_lock to lock the correct tuple, so there's no need
2319 : * to do them again.)
2320 : */
2321 307646 : redo_act:
2322 307742 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2323 : canSetTag, &updateCxt);
2324 :
2325 : /*
2326 : * If ExecUpdateAct reports that a cross-partition update was done,
2327 : * then the RETURNING tuple (if any) has been projected and there's
2328 : * nothing else for us to do.
2329 : */
2330 307456 : if (updateCxt.crossPartUpdate)
2331 660 : return context->cpUpdateReturningSlot;
2332 :
2333 306796 : switch (result)
2334 : {
2335 84 : case TM_SelfModified:
2336 :
2337 : /*
2338 : * The target tuple was already updated or deleted by the
2339 : * current command, or by a later command in the current
2340 : * transaction. The former case is possible in a join UPDATE
2341 : * where multiple tuples join to the same target tuple. This
2342 : * is pretty questionable, but Postgres has always allowed it:
2343 : * we just execute the first update action and ignore
2344 : * additional update attempts.
2345 : *
2346 : * The latter case arises if the tuple is modified by a
2347 : * command in a BEFORE trigger, or perhaps by a command in a
2348 : * volatile function used in the query. In such situations we
2349 : * should not ignore the update, but it is equally unsafe to
2350 : * proceed. We don't want to discard the original UPDATE
2351 : * while keeping the triggered actions based on it; and we
2352 : * have no principled way to merge this update with the
2353 : * previous ones. So throwing an error is the only safe
2354 : * course.
2355 : *
2356 : * If a trigger actually intends this type of interaction, it
2357 : * can re-execute the UPDATE (assuming it can figure out how)
2358 : * and then return NULL to cancel the outer update.
2359 : */
2360 84 : if (context->tmfd.cmax != estate->es_output_cid)
2361 6 : ereport(ERROR,
2362 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2363 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2364 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2365 :
2366 : /* Else, already updated by self; nothing to do */
2367 78 : return NULL;
2368 :
2369 306556 : case TM_Ok:
2370 306556 : break;
2371 :
2372 148 : case TM_Updated:
2373 : {
2374 : TupleTableSlot *inputslot;
2375 : TupleTableSlot *epqslot;
2376 : TupleTableSlot *oldSlot;
2377 :
2378 148 : if (IsolationUsesXactSnapshot())
2379 4 : ereport(ERROR,
2380 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2381 : errmsg("could not serialize access due to concurrent update")));
2382 :
2383 : /*
2384 : * Already know that we're going to need to do EPQ, so
2385 : * fetch tuple directly into the right slot.
2386 : */
2387 144 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2388 : resultRelInfo->ri_RangeTableIndex);
2389 :
2390 144 : result = table_tuple_lock(resultRelationDesc, tupleid,
2391 : estate->es_snapshot,
2392 : inputslot, estate->es_output_cid,
2393 : updateCxt.lockmode, LockWaitBlock,
2394 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2395 : &context->tmfd);
2396 :
2397 140 : switch (result)
2398 : {
2399 130 : case TM_Ok:
2400 : Assert(context->tmfd.traversed);
2401 :
2402 130 : epqslot = EvalPlanQual(context->epqstate,
2403 : resultRelationDesc,
2404 : resultRelInfo->ri_RangeTableIndex,
2405 : inputslot);
2406 130 : if (TupIsNull(epqslot))
2407 : /* Tuple not passing quals anymore, exiting... */
2408 34 : return NULL;
2409 :
2410 : /* Make sure ri_oldTupleSlot is initialized. */
2411 96 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2412 0 : ExecInitUpdateProjection(context->mtstate,
2413 : resultRelInfo);
2414 :
2415 : /* Fetch the most recent version of old tuple. */
2416 96 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2417 96 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2418 : tupleid,
2419 : SnapshotAny,
2420 : oldSlot))
2421 0 : elog(ERROR, "failed to fetch tuple being updated");
2422 96 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2423 : epqslot, oldSlot);
2424 96 : goto redo_act;
2425 :
2426 2 : case TM_Deleted:
2427 : /* tuple already deleted; nothing to do */
2428 2 : return NULL;
2429 :
2430 8 : case TM_SelfModified:
2431 :
2432 : /*
2433 : * This can be reached when following an update
2434 : * chain from a tuple updated by another session,
2435 : * reaching a tuple that was already updated in
2436 : * this transaction. If previously modified by
2437 : * this command, ignore the redundant update,
2438 : * otherwise error out.
2439 : *
2440 : * See also TM_SelfModified response to
2441 : * table_tuple_update() above.
2442 : */
2443 8 : if (context->tmfd.cmax != estate->es_output_cid)
2444 2 : ereport(ERROR,
2445 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2446 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2447 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2448 6 : return NULL;
2449 :
2450 0 : default:
2451 : /* see table_tuple_lock call in ExecDelete() */
2452 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2453 : result);
2454 : return NULL;
2455 : }
2456 : }
2457 :
2458 : break;
2459 :
2460 8 : case TM_Deleted:
2461 8 : if (IsolationUsesXactSnapshot())
2462 0 : ereport(ERROR,
2463 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2464 : errmsg("could not serialize access due to concurrent delete")));
2465 : /* tuple already deleted; nothing to do */
2466 8 : return NULL;
2467 :
2468 0 : default:
2469 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2470 : result);
2471 : return NULL;
2472 : }
2473 : }
2474 :
2475 306786 : if (canSetTag)
2476 306194 : (estate->es_processed)++;
2477 :
2478 306786 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2479 : slot);
2480 :
2481 : /* Process RETURNING if present */
2482 306704 : if (resultRelInfo->ri_projectReturning)
2483 2140 : return ExecProcessReturning(resultRelInfo, slot, context->planSlot);
2484 :
2485 304564 : return NULL;
2486 : }
2487 :
2488 : /*
2489 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2490 : *
2491 : * Try to lock tuple for update as part of speculative insertion. If
2492 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2493 : * (but still lock row, even though it may not satisfy estate's
2494 : * snapshot).
2495 : *
2496 : * Returns true if we're done (with or without an update), or false if
2497 : * the caller must retry the INSERT from scratch.
2498 : */
2499 : static bool
2500 5200 : ExecOnConflictUpdate(ModifyTableContext *context,
2501 : ResultRelInfo *resultRelInfo,
2502 : ItemPointer conflictTid,
2503 : TupleTableSlot *excludedSlot,
2504 : bool canSetTag,
2505 : TupleTableSlot **returning)
2506 : {
2507 5200 : ModifyTableState *mtstate = context->mtstate;
2508 5200 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2509 5200 : Relation relation = resultRelInfo->ri_RelationDesc;
2510 5200 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2511 5200 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2512 : TM_FailureData tmfd;
2513 : LockTupleMode lockmode;
2514 : TM_Result test;
2515 : Datum xminDatum;
2516 : TransactionId xmin;
2517 : bool isnull;
2518 :
2519 : /* Determine lock mode to use */
2520 5200 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2521 :
2522 : /*
2523 : * Lock tuple for update. Don't follow updates when tuple cannot be
2524 : * locked without doing so. A row locking conflict here means our
2525 : * previous conclusion that the tuple is conclusively committed is not
2526 : * true anymore.
2527 : */
2528 5200 : test = table_tuple_lock(relation, conflictTid,
2529 5200 : context->estate->es_snapshot,
2530 5200 : existing, context->estate->es_output_cid,
2531 : lockmode, LockWaitBlock, 0,
2532 : &tmfd);
2533 5200 : switch (test)
2534 : {
2535 5176 : case TM_Ok:
2536 : /* success! */
2537 5176 : break;
2538 :
2539 24 : case TM_Invisible:
2540 :
2541 : /*
2542 : * This can occur when a just inserted tuple is updated again in
2543 : * the same command. E.g. because multiple rows with the same
2544 : * conflicting key values are inserted.
2545 : *
2546 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2547 : * case. We do not want to proceed because it would lead to the
2548 : * same row being updated a second time in some unspecified order,
2549 : * and in contrast to plain UPDATEs there's no historical behavior
2550 : * to break.
2551 : *
2552 : * It is the user's responsibility to prevent this situation from
2553 : * occurring. These problems are why the SQL standard similarly
2554 : * specifies that for SQL MERGE, an exception must be raised in
2555 : * the event of an attempt to update the same row twice.
2556 : */
2557 24 : xminDatum = slot_getsysattr(existing,
2558 : MinTransactionIdAttributeNumber,
2559 : &isnull);
2560 : Assert(!isnull);
2561 24 : xmin = DatumGetTransactionId(xminDatum);
2562 :
2563 24 : if (TransactionIdIsCurrentTransactionId(xmin))
2564 24 : ereport(ERROR,
2565 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2566 : /* translator: %s is a SQL command name */
2567 : errmsg("%s command cannot affect row a second time",
2568 : "ON CONFLICT DO UPDATE"),
2569 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2570 :
2571 : /* This shouldn't happen */
2572 0 : elog(ERROR, "attempted to lock invisible tuple");
2573 : break;
2574 :
2575 0 : case TM_SelfModified:
2576 :
2577 : /*
2578 : * This state should never be reached. As a dirty snapshot is used
2579 : * to find conflicting tuples, speculative insertion wouldn't have
2580 : * seen this row to conflict with.
2581 : */
2582 0 : elog(ERROR, "unexpected self-updated tuple");
2583 : break;
2584 :
2585 0 : case TM_Updated:
2586 0 : if (IsolationUsesXactSnapshot())
2587 0 : ereport(ERROR,
2588 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2589 : errmsg("could not serialize access due to concurrent update")));
2590 :
2591 : /*
2592 : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2593 : * a partitioned table we shouldn't reach to a case where tuple to
2594 : * be lock is moved to another partition due to concurrent update
2595 : * of the partition key.
2596 : */
2597 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2598 :
2599 : /*
2600 : * Tell caller to try again from the very start.
2601 : *
2602 : * It does not make sense to use the usual EvalPlanQual() style
2603 : * loop here, as the new version of the row might not conflict
2604 : * anymore, or the conflicting tuple has actually been deleted.
2605 : */
2606 0 : ExecClearTuple(existing);
2607 0 : return false;
2608 :
2609 0 : case TM_Deleted:
2610 0 : if (IsolationUsesXactSnapshot())
2611 0 : ereport(ERROR,
2612 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2613 : errmsg("could not serialize access due to concurrent delete")));
2614 :
2615 : /* see TM_Updated case */
2616 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2617 0 : ExecClearTuple(existing);
2618 0 : return false;
2619 :
2620 0 : default:
2621 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2622 : }
2623 :
2624 : /* Success, the tuple is locked. */
2625 :
2626 : /*
2627 : * Verify that the tuple is visible to our MVCC snapshot if the current
2628 : * isolation level mandates that.
2629 : *
2630 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2631 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2632 : *
2633 : * This means we only ever continue when a new command in the current
2634 : * transaction could see the row, even though in READ COMMITTED mode the
2635 : * tuple will not be visible according to the current statement's
2636 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2637 : * versions.
2638 : */
2639 5176 : ExecCheckTupleVisible(context->estate, relation, existing);
2640 :
2641 : /*
2642 : * Make tuple and any needed join variables available to ExecQual and
2643 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2644 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2645 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2646 : * other redirection.
2647 : */
2648 5176 : econtext->ecxt_scantuple = existing;
2649 5176 : econtext->ecxt_innertuple = excludedSlot;
2650 5176 : econtext->ecxt_outertuple = NULL;
2651 :
2652 5176 : if (!ExecQual(onConflictSetWhere, econtext))
2653 : {
2654 32 : ExecClearTuple(existing); /* see return below */
2655 32 : InstrCountFiltered1(&mtstate->ps, 1);
2656 32 : return true; /* done with the tuple */
2657 : }
2658 :
2659 5144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2660 : {
2661 : /*
2662 : * Check target's existing tuple against UPDATE-applicable USING
2663 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2664 : *
2665 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2666 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2667 : * but that's almost the extent of its special handling for ON
2668 : * CONFLICT DO UPDATE.
2669 : *
2670 : * The rewriter will also have associated UPDATE applicable straight
2671 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2672 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2673 : * kinds, so there is no danger of spurious over-enforcement in the
2674 : * INSERT or UPDATE path.
2675 : */
2676 60 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2677 : existing,
2678 : mtstate->ps.state);
2679 : }
2680 :
2681 : /* Project the new tuple version */
2682 5120 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2683 :
2684 : /*
2685 : * Note that it is possible that the target tuple has been modified in
2686 : * this session, after the above table_tuple_lock. We choose to not error
2687 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2688 : * This can happen if an UPDATE is triggered from within ExecQual(),
2689 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2690 : * wCTE in the ON CONFLICT's SET.
2691 : */
2692 :
2693 : /* Execute UPDATE with projection */
2694 10210 : *returning = ExecUpdate(context, resultRelInfo,
2695 : conflictTid, NULL,
2696 5120 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2697 : canSetTag);
2698 :
2699 : /*
2700 : * Clear out existing tuple, as there might not be another conflict among
2701 : * the next input rows. Don't want to hold resources till the end of the
2702 : * query.
2703 : */
2704 5090 : ExecClearTuple(existing);
2705 5090 : return true;
2706 : }
2707 :
2708 : /*
2709 : * Perform MERGE.
2710 : */
2711 : static TupleTableSlot *
2712 5120 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2713 : ItemPointer tupleid, bool canSetTag)
2714 : {
2715 : bool matched;
2716 :
2717 : /*-----
2718 : * If we are dealing with a WHEN MATCHED case (tupleid is valid), we
2719 : * execute the first action for which the additional WHEN MATCHED AND
2720 : * quals pass. If an action without quals is found, that action is
2721 : * executed.
2722 : *
2723 : * Similarly, if we are dealing with WHEN NOT MATCHED case, we look at
2724 : * the given WHEN NOT MATCHED actions in sequence until one passes.
2725 : *
2726 : * Things get interesting in case of concurrent update/delete of the
2727 : * target tuple. Such concurrent update/delete is detected while we are
2728 : * executing a WHEN MATCHED action.
2729 : *
2730 : * A concurrent update can:
2731 : *
2732 : * 1. modify the target tuple so that it no longer satisfies the
2733 : * additional quals attached to the current WHEN MATCHED action
2734 : *
2735 : * In this case, we are still dealing with a WHEN MATCHED case.
2736 : * We recheck the list of WHEN MATCHED actions from the start and
2737 : * choose the first one that satisfies the new target tuple.
2738 : *
2739 : * 2. modify the target tuple so that the join quals no longer pass and
2740 : * hence the source tuple no longer has a match.
2741 : *
2742 : * In this case, the source tuple no longer matches the target tuple,
2743 : * so we now instead find a qualifying WHEN NOT MATCHED action to
2744 : * execute.
2745 : *
2746 : * XXX Hmmm, what if the updated tuple would now match one that was
2747 : * considered NOT MATCHED so far?
2748 : *
2749 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED.
2750 : *
2751 : * ExecMergeMatched takes care of following the update chain and
2752 : * re-finding the qualifying WHEN MATCHED action, as long as the updated
2753 : * target tuple still satisfies the join quals, i.e., it remains a WHEN
2754 : * MATCHED case. If the tuple gets deleted or the join quals fail, it
2755 : * returns and we try ExecMergeNotMatched. Given that ExecMergeMatched
2756 : * always make progress by following the update chain and we never switch
2757 : * from ExecMergeNotMatched to ExecMergeMatched, there is no risk of a
2758 : * livelock.
2759 : */
2760 5120 : matched = tupleid != NULL;
2761 5120 : if (matched)
2762 3058 : matched = ExecMergeMatched(context, resultRelInfo, tupleid, canSetTag);
2763 :
2764 : /*
2765 : * Either we were dealing with a NOT MATCHED tuple or ExecMergeMatched()
2766 : * returned "false", indicating the previously MATCHED tuple no longer
2767 : * matches.
2768 : */
2769 5064 : if (!matched)
2770 2078 : ExecMergeNotMatched(context, resultRelInfo, canSetTag);
2771 :
2772 : /* No RETURNING support yet */
2773 5034 : return NULL;
2774 : }
2775 :
2776 : /*
2777 : * Check and execute the first qualifying MATCHED action. The current target
2778 : * tuple is identified by tupleid.
2779 : *
2780 : * We start from the first WHEN MATCHED action and check if the WHEN quals
2781 : * pass, if any. If the WHEN quals for the first action do not pass, we
2782 : * check the second, then the third and so on. If we reach to the end, no
2783 : * action is taken and we return true, indicating that no further action is
2784 : * required for this tuple.
2785 : *
2786 : * If we do find a qualifying action, then we attempt to execute the action.
2787 : *
2788 : * If the tuple is concurrently updated, EvalPlanQual is run with the updated
2789 : * tuple to recheck the join quals. Note that the additional quals associated
2790 : * with individual actions are evaluated by this routine via ExecQual, while
2791 : * EvalPlanQual checks for the join quals. If EvalPlanQual tells us that the
2792 : * updated tuple still passes the join quals, then we restart from the first
2793 : * action to look for a qualifying action. Otherwise, we return false --
2794 : * meaning that a NOT MATCHED action must now be executed for the current
2795 : * source tuple.
2796 : */
2797 : static bool
2798 3058 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2799 : ItemPointer tupleid, bool canSetTag)
2800 : {
2801 3058 : ModifyTableState *mtstate = context->mtstate;
2802 : TupleTableSlot *newslot;
2803 3058 : EState *estate = context->estate;
2804 3058 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2805 : bool isNull;
2806 3058 : EPQState *epqstate = &mtstate->mt_epqstate;
2807 : ListCell *l;
2808 :
2809 : /*
2810 : * If there are no WHEN MATCHED actions, we are done.
2811 : */
2812 3058 : if (resultRelInfo->ri_matchedMergeAction == NIL)
2813 528 : return true;
2814 :
2815 : /*
2816 : * Make tuple and any needed join variables available to ExecQual and
2817 : * ExecProject. The target's existing tuple is installed in the scantuple.
2818 : * Again, this target relation's slot is required only in the case of a
2819 : * MATCHED tuple and UPDATE/DELETE actions.
2820 : */
2821 2530 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
2822 2530 : econtext->ecxt_innertuple = context->planSlot;
2823 2530 : econtext->ecxt_outertuple = NULL;
2824 :
2825 2584 : lmerge_matched:
2826 :
2827 : /*
2828 : * This routine is only invoked for matched rows, and we must have found
2829 : * the tupleid of the target row in that case; fetch that tuple.
2830 : *
2831 : * We use SnapshotAny for this because we might get called again after
2832 : * EvalPlanQual returns us a new tuple, which may not be visible to our
2833 : * MVCC snapshot.
2834 : */
2835 :
2836 2584 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2837 : tupleid,
2838 : SnapshotAny,
2839 : resultRelInfo->ri_oldTupleSlot))
2840 0 : elog(ERROR, "failed to fetch the target tuple");
2841 :
2842 3972 : foreach(l, resultRelInfo->ri_matchedMergeAction)
2843 : {
2844 3228 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
2845 3228 : CmdType commandType = relaction->mas_action->commandType;
2846 : TM_Result result;
2847 3228 : UpdateContext updateCxt = {0};
2848 :
2849 : /*
2850 : * Test condition, if any.
2851 : *
2852 : * In the absence of any condition, we perform the action
2853 : * unconditionally (no need to check separately since ExecQual() will
2854 : * return true if there are no conditions to evaluate).
2855 : */
2856 3228 : if (!ExecQual(relaction->mas_whenqual, econtext))
2857 1388 : continue;
2858 :
2859 : /*
2860 : * Check if the existing target tuple meets the USING checks of
2861 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
2862 : * error.
2863 : *
2864 : * The WITH CHECK quals for UPDATE RLS policies are applied in
2865 : * ExecUpdateAct() and hence we need not do anything special to handle
2866 : * them.
2867 : *
2868 : * NOTE: We must do this after WHEN quals are evaluated, so that we
2869 : * check policies only when they matter.
2870 : */
2871 1840 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
2872 : {
2873 72 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
2874 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
2875 : resultRelInfo,
2876 : resultRelInfo->ri_oldTupleSlot,
2877 72 : context->mtstate->ps.state);
2878 : }
2879 :
2880 : /* Perform stated action */
2881 1816 : switch (commandType)
2882 : {
2883 1536 : case CMD_UPDATE:
2884 :
2885 : /*
2886 : * Project the output tuple, and use that to update the table.
2887 : * We don't need to filter out junk attributes, because the
2888 : * UPDATE action's targetlist doesn't have any.
2889 : */
2890 1536 : newslot = ExecProject(relaction->mas_proj);
2891 :
2892 1536 : context->relaction = relaction;
2893 1536 : if (!ExecUpdatePrologue(context, resultRelInfo,
2894 : tupleid, NULL, newslot, &result))
2895 : {
2896 18 : if (result == TM_Ok)
2897 90 : return true; /* "do nothing" */
2898 12 : break; /* concurrent update/delete */
2899 : }
2900 1518 : result = ExecUpdateAct(context, resultRelInfo, tupleid, NULL,
2901 : newslot, false, &updateCxt);
2902 :
2903 : /*
2904 : * As in ExecUpdate(), if ExecUpdateAct() reports that a
2905 : * cross-partition update was done, then there's nothing else
2906 : * for us to do --- the UPDATE has been turned into a DELETE
2907 : * and an INSERT, and we must not perform any of the usual
2908 : * post-update tasks.
2909 : */
2910 1498 : if (updateCxt.crossPartUpdate)
2911 : {
2912 62 : mtstate->mt_merge_updated += 1;
2913 62 : if (canSetTag)
2914 62 : (estate->es_processed)++;
2915 62 : return true;
2916 : }
2917 :
2918 1436 : if (result == TM_Ok && updateCxt.updated)
2919 : {
2920 1384 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
2921 : tupleid, NULL, newslot);
2922 1384 : mtstate->mt_merge_updated += 1;
2923 : }
2924 1436 : break;
2925 :
2926 262 : case CMD_DELETE:
2927 262 : context->relaction = relaction;
2928 262 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
2929 : NULL, NULL, &result))
2930 : {
2931 12 : if (result == TM_Ok)
2932 6 : return true; /* "do nothing" */
2933 6 : break; /* concurrent update/delete */
2934 : }
2935 250 : result = ExecDeleteAct(context, resultRelInfo, tupleid, false);
2936 250 : if (result == TM_Ok)
2937 : {
2938 238 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
2939 : false);
2940 238 : mtstate->mt_merge_deleted += 1;
2941 : }
2942 250 : break;
2943 :
2944 18 : case CMD_NOTHING:
2945 : /* Doing nothing is always OK */
2946 18 : result = TM_Ok;
2947 18 : break;
2948 :
2949 0 : default:
2950 0 : elog(ERROR, "unknown action in MERGE WHEN MATCHED clause");
2951 : }
2952 :
2953 1722 : switch (result)
2954 : {
2955 1640 : case TM_Ok:
2956 : /* all good; perform final actions */
2957 1640 : if (canSetTag && commandType != CMD_NOTHING)
2958 1622 : (estate->es_processed)++;
2959 :
2960 1640 : break;
2961 :
2962 12 : case TM_SelfModified:
2963 :
2964 : /*
2965 : * The SQL standard disallows this for MERGE.
2966 : */
2967 12 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
2968 12 : ereport(ERROR,
2969 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2970 : /* translator: %s is a SQL command name */
2971 : errmsg("%s command cannot affect row a second time",
2972 : "MERGE"),
2973 : errhint("Ensure that not more than one source row matches any one target row.")));
2974 : /* This shouldn't happen */
2975 0 : elog(ERROR, "attempted to update or delete invisible tuple");
2976 : break;
2977 :
2978 8 : case TM_Deleted:
2979 8 : if (IsolationUsesXactSnapshot())
2980 0 : ereport(ERROR,
2981 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2982 : errmsg("could not serialize access due to concurrent delete")));
2983 :
2984 : /*
2985 : * If the tuple was already deleted, return to let caller
2986 : * handle it under NOT MATCHED clauses.
2987 : */
2988 8 : return false;
2989 :
2990 62 : case TM_Updated:
2991 : {
2992 : Relation resultRelationDesc;
2993 : TupleTableSlot *epqslot,
2994 : *inputslot;
2995 : LockTupleMode lockmode;
2996 :
2997 : /*
2998 : * The target tuple was concurrently updated by some other
2999 : * transaction. Run EvalPlanQual() with the new version of
3000 : * the tuple. If it does not return a tuple, then we
3001 : * switch to the NOT MATCHED list of actions. If it does
3002 : * return a tuple and the join qual is still satisfied,
3003 : * then we just need to recheck the MATCHED actions,
3004 : * starting from the top, and execute the first qualifying
3005 : * action.
3006 : */
3007 62 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3008 62 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3009 :
3010 62 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3011 : resultRelInfo->ri_RangeTableIndex);
3012 :
3013 62 : result = table_tuple_lock(resultRelationDesc, tupleid,
3014 : estate->es_snapshot,
3015 : inputslot, estate->es_output_cid,
3016 : lockmode, LockWaitBlock,
3017 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3018 : &context->tmfd);
3019 62 : switch (result)
3020 : {
3021 60 : case TM_Ok:
3022 60 : epqslot = EvalPlanQual(epqstate,
3023 : resultRelationDesc,
3024 : resultRelInfo->ri_RangeTableIndex,
3025 : inputslot);
3026 :
3027 : /*
3028 : * If we got no tuple, or the tuple we get has a
3029 : * NULL ctid, go back to caller: this one is not a
3030 : * MATCHED tuple anymore, so they can retry with
3031 : * NOT MATCHED actions.
3032 : */
3033 60 : if (TupIsNull(epqslot))
3034 0 : return false;
3035 :
3036 60 : (void) ExecGetJunkAttribute(epqslot,
3037 60 : resultRelInfo->ri_RowIdAttNo,
3038 : &isNull);
3039 60 : if (isNull)
3040 6 : return false;
3041 :
3042 : /*
3043 : * When a tuple was updated and migrated to
3044 : * another partition concurrently, the current
3045 : * MERGE implementation can't follow. There's
3046 : * probably a better way to handle this case, but
3047 : * it'd require recognizing the relation to which
3048 : * the tuple moved, and setting our current
3049 : * resultRelInfo to that.
3050 : */
3051 54 : if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid))
3052 0 : ereport(ERROR,
3053 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3054 : errmsg("tuple to be deleted was already moved to another partition due to concurrent update")));
3055 :
3056 : /*
3057 : * A non-NULL ctid means that we are still dealing
3058 : * with MATCHED case. Restart the loop so that we
3059 : * apply all the MATCHED rules again, to ensure
3060 : * that the first qualifying WHEN MATCHED action
3061 : * is executed.
3062 : *
3063 : * Update tupleid to that of the new tuple, for
3064 : * the refetch we do at the top.
3065 : */
3066 54 : ItemPointerCopy(&context->tmfd.ctid, tupleid);
3067 54 : goto lmerge_matched;
3068 :
3069 2 : case TM_Deleted:
3070 :
3071 : /*
3072 : * tuple already deleted; tell caller to run NOT
3073 : * MATCHED actions
3074 : */
3075 2 : return false;
3076 :
3077 0 : case TM_SelfModified:
3078 :
3079 : /*
3080 : * This can be reached when following an update
3081 : * chain from a tuple updated by another session,
3082 : * reaching a tuple that was already updated in
3083 : * this transaction. If previously modified by
3084 : * this command, ignore the redundant update,
3085 : * otherwise error out.
3086 : *
3087 : * See also response to TM_SelfModified in
3088 : * ExecUpdate().
3089 : */
3090 0 : if (context->tmfd.cmax != estate->es_output_cid)
3091 0 : ereport(ERROR,
3092 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3093 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3094 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3095 0 : return false;
3096 :
3097 0 : default:
3098 : /* see table_tuple_lock call in ExecDelete() */
3099 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3100 : result);
3101 : return false;
3102 : }
3103 : }
3104 :
3105 0 : case TM_Invisible:
3106 : case TM_WouldBlock:
3107 : case TM_BeingModified:
3108 : /* these should not occur */
3109 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3110 : break;
3111 : }
3112 :
3113 : /*
3114 : * We've activated one of the WHEN clauses, so we don't search
3115 : * further. This is required behaviour, not an optimization.
3116 : */
3117 1640 : break;
3118 : }
3119 :
3120 : /*
3121 : * Successfully executed an action or no qualifying action was found.
3122 : */
3123 2384 : return true;
3124 : }
3125 :
3126 : /*
3127 : * Execute the first qualifying NOT MATCHED action.
3128 : */
3129 : static void
3130 2078 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3131 : bool canSetTag)
3132 : {
3133 2078 : ModifyTableState *mtstate = context->mtstate;
3134 2078 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3135 2078 : List *actionStates = NIL;
3136 : ListCell *l;
3137 :
3138 : /*
3139 : * For INSERT actions, the root relation's merge action is OK since the
3140 : * INSERT's targetlist and the WHEN conditions can only refer to the
3141 : * source relation and hence it does not matter which result relation we
3142 : * work with.
3143 : *
3144 : * XXX does this mean that we can avoid creating copies of actionStates on
3145 : * partitioned tables, for not-matched actions?
3146 : */
3147 2078 : actionStates = resultRelInfo->ri_notMatchedMergeAction;
3148 :
3149 : /*
3150 : * Make source tuple available to ExecQual and ExecProject. We don't need
3151 : * the target tuple, since the WHEN quals and targetlist can't refer to
3152 : * the target columns.
3153 : */
3154 2078 : econtext->ecxt_scantuple = NULL;
3155 2078 : econtext->ecxt_innertuple = context->planSlot;
3156 2078 : econtext->ecxt_outertuple = NULL;
3157 :
3158 2642 : foreach(l, actionStates)
3159 : {
3160 2078 : MergeActionState *action = (MergeActionState *) lfirst(l);
3161 2078 : CmdType commandType = action->mas_action->commandType;
3162 : TupleTableSlot *newslot;
3163 :
3164 : /*
3165 : * Test condition, if any.
3166 : *
3167 : * In the absence of any condition, we perform the action
3168 : * unconditionally (no need to check separately since ExecQual() will
3169 : * return true if there are no conditions to evaluate).
3170 : */
3171 2078 : if (!ExecQual(action->mas_whenqual, econtext))
3172 564 : continue;
3173 :
3174 : /* Perform stated action */
3175 1514 : switch (commandType)
3176 : {
3177 1514 : case CMD_INSERT:
3178 :
3179 : /*
3180 : * Project the tuple. In case of a partitioned table, the
3181 : * projection was already built to use the root's descriptor,
3182 : * so we don't need to map the tuple here.
3183 : */
3184 1514 : newslot = ExecProject(action->mas_proj);
3185 1514 : context->relaction = action;
3186 :
3187 1514 : (void) ExecInsert(context, mtstate->rootResultRelInfo, newslot,
3188 : canSetTag, NULL, NULL);
3189 1484 : mtstate->mt_merge_inserted += 1;
3190 1484 : break;
3191 0 : case CMD_NOTHING:
3192 : /* Do nothing */
3193 0 : break;
3194 0 : default:
3195 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3196 : }
3197 :
3198 : /*
3199 : * We've activated one of the WHEN clauses, so we don't search
3200 : * further. This is required behaviour, not an optimization.
3201 : */
3202 1484 : break;
3203 : }
3204 2048 : }
3205 :
3206 : /*
3207 : * Initialize state for execution of MERGE.
3208 : */
3209 : void
3210 914 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3211 : {
3212 914 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3213 914 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3214 : ResultRelInfo *resultRelInfo;
3215 : ExprContext *econtext;
3216 : ListCell *lc;
3217 : int i;
3218 :
3219 914 : if (node->mergeActionLists == NIL)
3220 0 : return;
3221 :
3222 914 : mtstate->mt_merge_subcommands = 0;
3223 :
3224 914 : if (mtstate->ps.ps_ExprContext == NULL)
3225 914 : ExecAssignExprContext(estate, &mtstate->ps);
3226 914 : econtext = mtstate->ps.ps_ExprContext;
3227 :
3228 : /*
3229 : * Create a MergeActionState for each action on the mergeActionList and
3230 : * add it to either a list of matched actions or not-matched actions.
3231 : *
3232 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3233 : * anything here, do so there too.
3234 : */
3235 914 : i = 0;
3236 1962 : foreach(lc, node->mergeActionLists)
3237 : {
3238 1048 : List *mergeActionList = lfirst(lc);
3239 : TupleDesc relationDesc;
3240 : ListCell *l;
3241 :
3242 1048 : resultRelInfo = mtstate->resultRelInfo + i;
3243 1048 : i++;
3244 1048 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3245 :
3246 : /* initialize slots for MERGE fetches from this rel */
3247 1048 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3248 1048 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3249 :
3250 2728 : foreach(l, mergeActionList)
3251 : {
3252 1680 : MergeAction *action = (MergeAction *) lfirst(l);
3253 : MergeActionState *action_state;
3254 : TupleTableSlot *tgtslot;
3255 : TupleDesc tgtdesc;
3256 : List **list;
3257 :
3258 : /*
3259 : * Build action merge state for this rel. (For partitions,
3260 : * equivalent code exists in ExecInitPartitionInfo.)
3261 : */
3262 1680 : action_state = makeNode(MergeActionState);
3263 1680 : action_state->mas_action = action;
3264 1680 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3265 : &mtstate->ps);
3266 :
3267 : /*
3268 : * We create two lists - one for WHEN MATCHED actions and one for
3269 : * WHEN NOT MATCHED actions - and stick the MergeActionState into
3270 : * the appropriate list.
3271 : */
3272 1680 : if (action_state->mas_action->matched)
3273 1018 : list = &resultRelInfo->ri_matchedMergeAction;
3274 : else
3275 662 : list = &resultRelInfo->ri_notMatchedMergeAction;
3276 1680 : *list = lappend(*list, action_state);
3277 :
3278 1680 : switch (action->commandType)
3279 : {
3280 654 : case CMD_INSERT:
3281 654 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3282 : action->targetList);
3283 :
3284 : /*
3285 : * If the MERGE targets a partitioned table, any INSERT
3286 : * actions must be routed through it, not the child
3287 : * relations. Initialize the routing struct and the root
3288 : * table's "new" tuple slot for that, if not already done.
3289 : * The projection we prepare, for all relations, uses the
3290 : * root relation descriptor, and targets the plan's root
3291 : * slot. (This is consistent with the fact that we
3292 : * checked the plan output to match the root relation,
3293 : * above.)
3294 : */
3295 654 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3296 : RELKIND_PARTITIONED_TABLE)
3297 : {
3298 186 : if (mtstate->mt_partition_tuple_routing == NULL)
3299 : {
3300 : /*
3301 : * Initialize planstate for routing if not already
3302 : * done.
3303 : *
3304 : * Note that the slot is managed as a standalone
3305 : * slot belonging to ModifyTableState, so we pass
3306 : * NULL for the 2nd argument.
3307 : */
3308 90 : mtstate->mt_root_tuple_slot =
3309 90 : table_slot_create(rootRelInfo->ri_RelationDesc,
3310 : NULL);
3311 90 : mtstate->mt_partition_tuple_routing =
3312 90 : ExecSetupPartitionTupleRouting(estate,
3313 : rootRelInfo->ri_RelationDesc);
3314 : }
3315 186 : tgtslot = mtstate->mt_root_tuple_slot;
3316 186 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3317 : }
3318 : else
3319 : {
3320 : /* not partitioned? use the stock relation and slot */
3321 468 : tgtslot = resultRelInfo->ri_newTupleSlot;
3322 468 : tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3323 : }
3324 :
3325 654 : action_state->mas_proj =
3326 654 : ExecBuildProjectionInfo(action->targetList, econtext,
3327 : tgtslot,
3328 : &mtstate->ps,
3329 : tgtdesc);
3330 :
3331 654 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3332 654 : break;
3333 786 : case CMD_UPDATE:
3334 786 : action_state->mas_proj =
3335 786 : ExecBuildUpdateProjection(action->targetList,
3336 : true,
3337 : action->updateColnos,
3338 : relationDesc,
3339 : econtext,
3340 : resultRelInfo->ri_newTupleSlot,
3341 : &mtstate->ps);
3342 786 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3343 786 : break;
3344 212 : case CMD_DELETE:
3345 212 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3346 212 : break;
3347 28 : case CMD_NOTHING:
3348 28 : break;
3349 0 : default:
3350 0 : elog(ERROR, "unknown operation");
3351 : break;
3352 : }
3353 : }
3354 : }
3355 : }
3356 :
3357 : /*
3358 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3359 : *
3360 : * We mark 'projectNewInfoValid' even though the projections themselves
3361 : * are not initialized here.
3362 : */
3363 : void
3364 1092 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3365 : ResultRelInfo *resultRelInfo)
3366 : {
3367 1092 : EState *estate = mtstate->ps.state;
3368 :
3369 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3370 :
3371 1092 : resultRelInfo->ri_oldTupleSlot =
3372 1092 : table_slot_create(resultRelInfo->ri_RelationDesc,
3373 : &estate->es_tupleTable);
3374 1092 : resultRelInfo->ri_newTupleSlot =
3375 1092 : table_slot_create(resultRelInfo->ri_RelationDesc,
3376 : &estate->es_tupleTable);
3377 1092 : resultRelInfo->ri_projectNewInfoValid = true;
3378 1092 : }
3379 :
3380 : /*
3381 : * Process BEFORE EACH STATEMENT triggers
3382 : */
3383 : static void
3384 101922 : fireBSTriggers(ModifyTableState *node)
3385 : {
3386 101922 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3387 101922 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3388 :
3389 101922 : switch (node->operation)
3390 : {
3391 76900 : case CMD_INSERT:
3392 76900 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3393 76888 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3394 828 : ExecBSUpdateTriggers(node->ps.state,
3395 : resultRelInfo);
3396 76888 : break;
3397 12390 : case CMD_UPDATE:
3398 12390 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3399 12390 : break;
3400 11778 : case CMD_DELETE:
3401 11778 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3402 11778 : break;
3403 854 : case CMD_MERGE:
3404 854 : if (node->mt_merge_subcommands & MERGE_INSERT)
3405 498 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3406 854 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3407 574 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3408 854 : if (node->mt_merge_subcommands & MERGE_DELETE)
3409 176 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3410 854 : break;
3411 0 : default:
3412 0 : elog(ERROR, "unknown operation");
3413 : break;
3414 : }
3415 101910 : }
3416 :
3417 : /*
3418 : * Process AFTER EACH STATEMENT triggers
3419 : */
3420 : static void
3421 99126 : fireASTriggers(ModifyTableState *node)
3422 : {
3423 99126 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3424 99126 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3425 :
3426 99126 : switch (node->operation)
3427 : {
3428 74894 : case CMD_INSERT:
3429 74894 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3430 726 : ExecASUpdateTriggers(node->ps.state,
3431 : resultRelInfo,
3432 726 : node->mt_oc_transition_capture);
3433 74894 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3434 74894 : node->mt_transition_capture);
3435 74894 : break;
3436 11810 : case CMD_UPDATE:
3437 11810 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3438 11810 : node->mt_transition_capture);
3439 11810 : break;
3440 11654 : case CMD_DELETE:
3441 11654 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3442 11654 : node->mt_transition_capture);
3443 11654 : break;
3444 768 : case CMD_MERGE:
3445 768 : if (node->mt_merge_subcommands & MERGE_DELETE)
3446 146 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3447 146 : node->mt_transition_capture);
3448 768 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3449 518 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3450 518 : node->mt_transition_capture);
3451 768 : if (node->mt_merge_subcommands & MERGE_INSERT)
3452 466 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3453 466 : node->mt_transition_capture);
3454 768 : break;
3455 0 : default:
3456 0 : elog(ERROR, "unknown operation");
3457 : break;
3458 : }
3459 99126 : }
3460 :
3461 : /*
3462 : * Set up the state needed for collecting transition tuples for AFTER
3463 : * triggers.
3464 : */
3465 : static void
3466 102214 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
3467 : {
3468 102214 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
3469 102214 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
3470 :
3471 : /* Check for transition tables on the directly targeted relation. */
3472 102214 : mtstate->mt_transition_capture =
3473 102214 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3474 102214 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3475 : mtstate->operation);
3476 102214 : if (plan->operation == CMD_INSERT &&
3477 76902 : plan->onConflictAction == ONCONFLICT_UPDATE)
3478 828 : mtstate->mt_oc_transition_capture =
3479 828 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3480 828 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3481 : CMD_UPDATE);
3482 102214 : }
3483 :
3484 : /*
3485 : * ExecPrepareTupleRouting --- prepare for routing one tuple
3486 : *
3487 : * Determine the partition in which the tuple in slot is to be inserted,
3488 : * and return its ResultRelInfo in *partRelInfo. The return value is
3489 : * a slot holding the tuple of the partition rowtype.
3490 : *
3491 : * This also sets the transition table information in mtstate based on the
3492 : * selected partition.
3493 : */
3494 : static TupleTableSlot *
3495 721090 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
3496 : EState *estate,
3497 : PartitionTupleRouting *proute,
3498 : ResultRelInfo *targetRelInfo,
3499 : TupleTableSlot *slot,
3500 : ResultRelInfo **partRelInfo)
3501 : {
3502 : ResultRelInfo *partrel;
3503 : TupleConversionMap *map;
3504 :
3505 : /*
3506 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3507 : * not find a valid partition for the tuple in 'slot' then an error is
3508 : * raised. An error may also be raised if the found partition is not a
3509 : * valid target for INSERTs. This is required since a partitioned table
3510 : * UPDATE to another partition becomes a DELETE+INSERT.
3511 : */
3512 721090 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3513 :
3514 : /*
3515 : * If we're capturing transition tuples, we might need to convert from the
3516 : * partition rowtype to root partitioned table's rowtype. But if there
3517 : * are no BEFORE triggers on the partition that could change the tuple, we
3518 : * can just remember the original unconverted tuple to avoid a needless
3519 : * round trip conversion.
3520 : */
3521 720892 : if (mtstate->mt_transition_capture != NULL)
3522 : {
3523 : bool has_before_insert_row_trig;
3524 :
3525 168 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3526 42 : partrel->ri_TrigDesc->trig_insert_before_row);
3527 :
3528 126 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
3529 126 : !has_before_insert_row_trig ? slot : NULL;
3530 : }
3531 :
3532 : /*
3533 : * Convert the tuple, if necessary.
3534 : */
3535 720892 : map = ExecGetRootToChildMap(partrel, estate);
3536 720892 : if (map != NULL)
3537 : {
3538 68310 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3539 :
3540 68310 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3541 : }
3542 :
3543 720892 : *partRelInfo = partrel;
3544 720892 : return slot;
3545 : }
3546 :
3547 : /* ----------------------------------------------------------------
3548 : * ExecModifyTable
3549 : *
3550 : * Perform table modifications as required, and return RETURNING results
3551 : * if needed.
3552 : * ----------------------------------------------------------------
3553 : */
3554 : static TupleTableSlot *
3555 109698 : ExecModifyTable(PlanState *pstate)
3556 : {
3557 109698 : ModifyTableState *node = castNode(ModifyTableState, pstate);
3558 : ModifyTableContext context;
3559 109698 : EState *estate = node->ps.state;
3560 109698 : CmdType operation = node->operation;
3561 : ResultRelInfo *resultRelInfo;
3562 : PlanState *subplanstate;
3563 : TupleTableSlot *slot;
3564 : TupleTableSlot *oldSlot;
3565 : ItemPointerData tuple_ctid;
3566 : HeapTupleData oldtupdata;
3567 : HeapTuple oldtuple;
3568 : ItemPointer tupleid;
3569 :
3570 109698 : CHECK_FOR_INTERRUPTS();
3571 :
3572 : /*
3573 : * This should NOT get called during EvalPlanQual; we should have passed a
3574 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
3575 : * Assert because this condition is easy to miss in testing. (Note:
3576 : * although ModifyTable should not get executed within an EvalPlanQual
3577 : * operation, we do have to allow it to be initialized and shut down in
3578 : * case it is within a CTE subplan. Hence this test must be here, not in
3579 : * ExecInitModifyTable.)
3580 : */
3581 109698 : if (estate->es_epq_active != NULL)
3582 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
3583 :
3584 : /*
3585 : * If we've already completed processing, don't try to do more. We need
3586 : * this test because ExecPostprocessPlan might call us an extra time, and
3587 : * our subplan's nodes aren't necessarily robust against being called
3588 : * extra times.
3589 : */
3590 109698 : if (node->mt_done)
3591 770 : return NULL;
3592 :
3593 : /*
3594 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
3595 : */
3596 108928 : if (node->fireBSTriggers)
3597 : {
3598 101922 : fireBSTriggers(node);
3599 101910 : node->fireBSTriggers = false;
3600 : }
3601 :
3602 : /* Preload local variables */
3603 108916 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
3604 108916 : subplanstate = outerPlanState(node);
3605 :
3606 : /* Set global context */
3607 108916 : context.mtstate = node;
3608 108916 : context.epqstate = &node->mt_epqstate;
3609 108916 : context.estate = estate;
3610 :
3611 : /*
3612 : * Fetch rows from subplan, and execute the required table modification
3613 : * for each row.
3614 : */
3615 : for (;;)
3616 : {
3617 : /*
3618 : * Reset the per-output-tuple exprcontext. This is needed because
3619 : * triggers expect to use that context as workspace. It's a bit ugly
3620 : * to do this below the top level of the plan, however. We might need
3621 : * to rethink this later.
3622 : */
3623 13075598 : ResetPerTupleExprContext(estate);
3624 :
3625 : /*
3626 : * Reset per-tuple memory context used for processing on conflict and
3627 : * returning clauses, to free any expression evaluation storage
3628 : * allocated in the previous cycle.
3629 : */
3630 13075598 : if (pstate->ps_ExprContext)
3631 326636 : ResetExprContext(pstate->ps_ExprContext);
3632 :
3633 13075598 : context.planSlot = ExecProcNode(subplanstate);
3634 :
3635 : /* No more tuples to process? */
3636 13075252 : if (TupIsNull(context.planSlot))
3637 : break;
3638 :
3639 : /*
3640 : * When there are multiple result relations, each tuple contains a
3641 : * junk column that gives the OID of the rel from which it came.
3642 : * Extract it and select the correct result relation.
3643 : */
3644 12976126 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
3645 : {
3646 : Datum datum;
3647 : bool isNull;
3648 : Oid resultoid;
3649 :
3650 4348 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
3651 : &isNull);
3652 4348 : if (isNull)
3653 : {
3654 : /*
3655 : * For commands other than MERGE, any tuples having InvalidOid
3656 : * for tableoid are errors. For MERGE, we may need to handle
3657 : * them as WHEN NOT MATCHED clauses if any, so do that.
3658 : *
3659 : * Note that we use the node's toplevel resultRelInfo, not any
3660 : * specific partition's.
3661 : */
3662 370 : if (operation == CMD_MERGE)
3663 : {
3664 370 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3665 :
3666 370 : ExecMerge(&context, node->resultRelInfo, NULL, node->canSetTag);
3667 370 : continue; /* no RETURNING support yet */
3668 : }
3669 :
3670 0 : elog(ERROR, "tableoid is NULL");
3671 : }
3672 3978 : resultoid = DatumGetObjectId(datum);
3673 :
3674 : /* If it's not the same as last time, we need to locate the rel */
3675 3978 : if (resultoid != node->mt_lastResultOid)
3676 2690 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
3677 : false, true);
3678 : }
3679 :
3680 : /*
3681 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
3682 : * here is compute the RETURNING expressions.
3683 : */
3684 12975756 : if (resultRelInfo->ri_usesFdwDirectModify)
3685 : {
3686 : Assert(resultRelInfo->ri_projectReturning);
3687 :
3688 : /*
3689 : * A scan slot containing the data that was actually inserted,
3690 : * updated or deleted has already been made available to
3691 : * ExecProcessReturning by IterateDirectModify, so no need to
3692 : * provide it here.
3693 : */
3694 694 : slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot);
3695 :
3696 694 : return slot;
3697 : }
3698 :
3699 12975062 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3700 12975062 : slot = context.planSlot;
3701 :
3702 12975062 : tupleid = NULL;
3703 12975062 : oldtuple = NULL;
3704 :
3705 : /*
3706 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
3707 : * to be updated/deleted/merged. For a heap relation, that's a TID;
3708 : * otherwise we may have a wholerow junk attr that carries the old
3709 : * tuple in toto. Keep this in step with the part of
3710 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
3711 : */
3712 12975062 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
3713 : operation == CMD_MERGE)
3714 : {
3715 : char relkind;
3716 : Datum datum;
3717 : bool isNull;
3718 :
3719 1832240 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
3720 1832240 : if (relkind == RELKIND_RELATION ||
3721 354 : relkind == RELKIND_MATVIEW ||
3722 : relkind == RELKIND_PARTITIONED_TABLE)
3723 : {
3724 : /* ri_RowIdAttNo refers to a ctid attribute */
3725 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
3726 1831892 : datum = ExecGetJunkAttribute(slot,
3727 1831892 : resultRelInfo->ri_RowIdAttNo,
3728 : &isNull);
3729 :
3730 : /*
3731 : * For commands other than MERGE, any tuples having a null row
3732 : * identifier are errors. For MERGE, we may need to handle
3733 : * them as WHEN NOT MATCHED clauses if any, so do that.
3734 : *
3735 : * Note that we use the node's toplevel resultRelInfo, not any
3736 : * specific partition's.
3737 : */
3738 1831892 : if (isNull)
3739 : {
3740 1692 : if (operation == CMD_MERGE)
3741 : {
3742 1692 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3743 :
3744 1692 : ExecMerge(&context, node->resultRelInfo, NULL, node->canSetTag);
3745 1662 : continue; /* no RETURNING support yet */
3746 : }
3747 :
3748 0 : elog(ERROR, "ctid is NULL");
3749 : }
3750 :
3751 1830200 : tupleid = (ItemPointer) DatumGetPointer(datum);
3752 1830200 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
3753 1830200 : tupleid = &tuple_ctid;
3754 : }
3755 :
3756 : /*
3757 : * Use the wholerow attribute, when available, to reconstruct the
3758 : * old relation tuple. The old tuple serves one or both of two
3759 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
3760 : * provides values for any unchanged columns for the NEW tuple of
3761 : * an UPDATE, because the subplan does not produce all the columns
3762 : * of the target table.
3763 : *
3764 : * Note that the wholerow attribute does not carry system columns,
3765 : * so foreign table triggers miss seeing those, except that we
3766 : * know enough here to set t_tableOid. Quite separately from
3767 : * this, the FDW may fetch its own junk attrs to identify the row.
3768 : *
3769 : * Other relevant relkinds, currently limited to views, always
3770 : * have a wholerow attribute.
3771 : */
3772 348 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
3773 : {
3774 330 : datum = ExecGetJunkAttribute(slot,
3775 330 : resultRelInfo->ri_RowIdAttNo,
3776 : &isNull);
3777 : /* shouldn't ever get a null result... */
3778 330 : if (isNull)
3779 0 : elog(ERROR, "wholerow is NULL");
3780 :
3781 330 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
3782 330 : oldtupdata.t_len =
3783 330 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
3784 330 : ItemPointerSetInvalid(&(oldtupdata.t_self));
3785 : /* Historically, view triggers see invalid t_tableOid. */
3786 330 : oldtupdata.t_tableOid =
3787 330 : (relkind == RELKIND_VIEW) ? InvalidOid :
3788 162 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
3789 :
3790 330 : oldtuple = &oldtupdata;
3791 : }
3792 : else
3793 : {
3794 : /* Only foreign tables are allowed to omit a row-ID attr */
3795 : Assert(relkind == RELKIND_FOREIGN_TABLE);
3796 : }
3797 : }
3798 :
3799 12973370 : switch (operation)
3800 : {
3801 11142822 : case CMD_INSERT:
3802 : /* Initialize projection info if first time for this table */
3803 11142822 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3804 75962 : ExecInitInsertProjection(node, resultRelInfo);
3805 11142822 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
3806 11142822 : slot = ExecInsert(&context, resultRelInfo, slot,
3807 11142822 : node->canSetTag, NULL, NULL);
3808 11140960 : break;
3809 :
3810 302956 : case CMD_UPDATE:
3811 : /* Initialize projection info if first time for this table */
3812 302956 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3813 12142 : ExecInitUpdateProjection(node, resultRelInfo);
3814 :
3815 : /*
3816 : * Make the new tuple by combining plan's output tuple with
3817 : * the old tuple being updated.
3818 : */
3819 302956 : oldSlot = resultRelInfo->ri_oldTupleSlot;
3820 302956 : if (oldtuple != NULL)
3821 : {
3822 : /* Use the wholerow junk attr as the old tuple. */
3823 258 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
3824 : }
3825 : else
3826 : {
3827 : /* Fetch the most recent version of old tuple. */
3828 302698 : Relation relation = resultRelInfo->ri_RelationDesc;
3829 :
3830 302698 : if (!table_tuple_fetch_row_version(relation, tupleid,
3831 : SnapshotAny,
3832 : oldSlot))
3833 0 : elog(ERROR, "failed to fetch tuple being updated");
3834 : }
3835 302956 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
3836 : oldSlot);
3837 302956 : context.relaction = NULL;
3838 :
3839 : /* Now apply the update. */
3840 302956 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
3841 302956 : slot, node->canSetTag);
3842 302560 : break;
3843 :
3844 1524534 : case CMD_DELETE:
3845 1524534 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
3846 1524534 : true, false, node->canSetTag, NULL, NULL);
3847 1524452 : break;
3848 :
3849 3058 : case CMD_MERGE:
3850 3058 : slot = ExecMerge(&context, resultRelInfo, tupleid, node->canSetTag);
3851 3002 : break;
3852 :
3853 0 : default:
3854 0 : elog(ERROR, "unknown operation");
3855 : break;
3856 : }
3857 :
3858 : /*
3859 : * If we got a RETURNING result, return it to caller. We'll continue
3860 : * the work on next call.
3861 : */
3862 12970974 : if (slot)
3863 6324 : return slot;
3864 : }
3865 :
3866 : /*
3867 : * Insert remaining tuples for batch insert.
3868 : */
3869 99126 : if (estate->es_insert_pending_result_relations != NIL)
3870 24 : ExecPendingInserts(estate);
3871 :
3872 : /*
3873 : * We're done, but fire AFTER STATEMENT triggers before exiting.
3874 : */
3875 99126 : fireASTriggers(node);
3876 :
3877 99126 : node->mt_done = true;
3878 :
3879 99126 : return NULL;
3880 : }
3881 :
3882 : /*
3883 : * ExecLookupResultRelByOid
3884 : * If the table with given OID is among the result relations to be
3885 : * updated by the given ModifyTable node, return its ResultRelInfo.
3886 : *
3887 : * If not found, return NULL if missing_ok, else raise error.
3888 : *
3889 : * If update_cache is true, then upon successful lookup, update the node's
3890 : * one-element cache. ONLY ExecModifyTable may pass true for this.
3891 : */
3892 : ResultRelInfo *
3893 10694 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
3894 : bool missing_ok, bool update_cache)
3895 : {
3896 10694 : if (node->mt_resultOidHash)
3897 : {
3898 : /* Use the pre-built hash table to locate the rel */
3899 : MTTargetRelLookup *mtlookup;
3900 :
3901 : mtlookup = (MTTargetRelLookup *)
3902 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
3903 0 : if (mtlookup)
3904 : {
3905 0 : if (update_cache)
3906 : {
3907 0 : node->mt_lastResultOid = resultoid;
3908 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
3909 : }
3910 0 : return node->resultRelInfo + mtlookup->relationIndex;
3911 : }
3912 : }
3913 : else
3914 : {
3915 : /* With few target rels, just search the ResultRelInfo array */
3916 20442 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
3917 : {
3918 12830 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
3919 :
3920 12830 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
3921 : {
3922 3082 : if (update_cache)
3923 : {
3924 2690 : node->mt_lastResultOid = resultoid;
3925 2690 : node->mt_lastResultIndex = ndx;
3926 : }
3927 3082 : return rInfo;
3928 : }
3929 : }
3930 : }
3931 :
3932 7612 : if (!missing_ok)
3933 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
3934 7612 : return NULL;
3935 : }
3936 :
3937 : /* ----------------------------------------------------------------
3938 : * ExecInitModifyTable
3939 : * ----------------------------------------------------------------
3940 : */
3941 : ModifyTableState *
3942 102984 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
3943 : {
3944 : ModifyTableState *mtstate;
3945 102984 : Plan *subplan = outerPlan(node);
3946 102984 : CmdType operation = node->operation;
3947 102984 : int nrels = list_length(node->resultRelations);
3948 : ResultRelInfo *resultRelInfo;
3949 : List *arowmarks;
3950 : ListCell *l;
3951 : int i;
3952 : Relation rel;
3953 :
3954 : /* check for unsupported flags */
3955 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
3956 :
3957 : /*
3958 : * create state structure
3959 : */
3960 102984 : mtstate = makeNode(ModifyTableState);
3961 102984 : mtstate->ps.plan = (Plan *) node;
3962 102984 : mtstate->ps.state = estate;
3963 102984 : mtstate->ps.ExecProcNode = ExecModifyTable;
3964 :
3965 102984 : mtstate->operation = operation;
3966 102984 : mtstate->canSetTag = node->canSetTag;
3967 102984 : mtstate->mt_done = false;
3968 :
3969 102984 : mtstate->mt_nrels = nrels;
3970 102984 : mtstate->resultRelInfo = (ResultRelInfo *)
3971 102984 : palloc(nrels * sizeof(ResultRelInfo));
3972 :
3973 102984 : mtstate->mt_merge_inserted = 0;
3974 102984 : mtstate->mt_merge_updated = 0;
3975 102984 : mtstate->mt_merge_deleted = 0;
3976 :
3977 : /*----------
3978 : * Resolve the target relation. This is the same as:
3979 : *
3980 : * - the relation for which we will fire FOR STATEMENT triggers,
3981 : * - the relation into whose tuple format all captured transition tuples
3982 : * must be converted, and
3983 : * - the root partitioned table used for tuple routing.
3984 : *
3985 : * If it's a partitioned or inherited table, the root partition or
3986 : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
3987 : * given explicitly in node->rootRelation. Otherwise, the target relation
3988 : * is the sole relation in the node->resultRelations list.
3989 : *----------
3990 : */
3991 102984 : if (node->rootRelation > 0)
3992 : {
3993 2350 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
3994 2350 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
3995 : node->rootRelation);
3996 : }
3997 : else
3998 : {
3999 : Assert(list_length(node->resultRelations) == 1);
4000 100634 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4001 100634 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4002 100634 : linitial_int(node->resultRelations));
4003 : }
4004 :
4005 : /* set up epqstate with dummy subplan data for the moment */
4006 102984 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4007 : node->epqParam, node->resultRelations);
4008 102984 : mtstate->fireBSTriggers = true;
4009 :
4010 : /*
4011 : * Build state for collecting transition tuples. This requires having a
4012 : * valid trigger query context, so skip it in explain-only mode.
4013 : */
4014 102984 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4015 102214 : ExecSetupTransitionCaptureState(mtstate, estate);
4016 :
4017 : /*
4018 : * Open all the result relations and initialize the ResultRelInfo structs.
4019 : * (But root relation was initialized above, if it's part of the array.)
4020 : * We must do this before initializing the subplan, because direct-modify
4021 : * FDWs expect their ResultRelInfos to be available.
4022 : */
4023 102984 : resultRelInfo = mtstate->resultRelInfo;
4024 102984 : i = 0;
4025 207904 : foreach(l, node->resultRelations)
4026 : {
4027 105188 : Index resultRelation = lfirst_int(l);
4028 :
4029 105188 : if (resultRelInfo != mtstate->rootResultRelInfo)
4030 : {
4031 4554 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4032 :
4033 : /*
4034 : * For child result relations, store the root result relation
4035 : * pointer. We do so for the convenience of places that want to
4036 : * look at the query's original target relation but don't have the
4037 : * mtstate handy.
4038 : */
4039 4554 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4040 : }
4041 :
4042 : /* Initialize the usesFdwDirectModify flag */
4043 105188 : resultRelInfo->ri_usesFdwDirectModify =
4044 105188 : bms_is_member(i, node->fdwDirectModifyPlans);
4045 :
4046 : /*
4047 : * Verify result relation is a valid target for the current operation
4048 : */
4049 105188 : CheckValidResultRel(resultRelInfo, operation);
4050 :
4051 104920 : resultRelInfo++;
4052 104920 : i++;
4053 : }
4054 :
4055 : /*
4056 : * Now we may initialize the subplan.
4057 : */
4058 102716 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4059 :
4060 : /*
4061 : * Do additional per-result-relation initialization.
4062 : */
4063 207602 : for (i = 0; i < nrels; i++)
4064 : {
4065 104886 : resultRelInfo = &mtstate->resultRelInfo[i];
4066 :
4067 : /* Let FDWs init themselves for foreign-table result rels */
4068 104886 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4069 104678 : resultRelInfo->ri_FdwRoutine != NULL &&
4070 306 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4071 : {
4072 306 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4073 :
4074 306 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4075 : resultRelInfo,
4076 : fdw_private,
4077 : i,
4078 : eflags);
4079 : }
4080 :
4081 : /*
4082 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4083 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4084 : * tables, the FDW might have created additional junk attr(s), but
4085 : * those are no concern of ours.
4086 : */
4087 104886 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4088 : operation == CMD_MERGE)
4089 : {
4090 : char relkind;
4091 :
4092 27766 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4093 27766 : if (relkind == RELKIND_RELATION ||
4094 568 : relkind == RELKIND_MATVIEW ||
4095 : relkind == RELKIND_PARTITIONED_TABLE)
4096 : {
4097 27234 : resultRelInfo->ri_RowIdAttNo =
4098 27234 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4099 27234 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4100 0 : elog(ERROR, "could not find junk ctid column");
4101 : }
4102 532 : else if (relkind == RELKIND_FOREIGN_TABLE)
4103 : {
4104 : /*
4105 : * We don't support MERGE with foreign tables for now. (It's
4106 : * problematic because the implementation uses CTID.)
4107 : */
4108 : Assert(operation != CMD_MERGE);
4109 :
4110 : /*
4111 : * When there is a row-level trigger, there should be a
4112 : * wholerow attribute. We also require it to be present in
4113 : * UPDATE and MERGE, so we can get the values of unchanged
4114 : * columns.
4115 : */
4116 340 : resultRelInfo->ri_RowIdAttNo =
4117 340 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4118 : "wholerow");
4119 340 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4120 190 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4121 0 : elog(ERROR, "could not find junk wholerow column");
4122 : }
4123 : else
4124 : {
4125 : /* No support for MERGE */
4126 : Assert(operation != CMD_MERGE);
4127 : /* Other valid target relkinds must provide wholerow */
4128 192 : resultRelInfo->ri_RowIdAttNo =
4129 192 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4130 : "wholerow");
4131 192 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4132 0 : elog(ERROR, "could not find junk wholerow column");
4133 : }
4134 : }
4135 : }
4136 :
4137 : /*
4138 : * If this is an inherited update/delete/merge, there will be a junk
4139 : * attribute named "tableoid" present in the subplan's targetlist. It
4140 : * will be used to identify the result relation for a given tuple to be
4141 : * updated/deleted/merged.
4142 : */
4143 102716 : mtstate->mt_resultOidAttno =
4144 102716 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4145 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || nrels == 1);
4146 102716 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4147 102716 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4148 :
4149 : /* Get the root target relation */
4150 102716 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4151 :
4152 : /*
4153 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4154 : * or MERGE might need this too, but only if it actually moves tuples
4155 : * between partitions; in that case setup is done by
4156 : * ExecCrossPartitionUpdate.
4157 : */
4158 102716 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4159 : operation == CMD_INSERT)
4160 5380 : mtstate->mt_partition_tuple_routing =
4161 5380 : ExecSetupPartitionTupleRouting(estate, rel);
4162 :
4163 : /*
4164 : * Initialize any WITH CHECK OPTION constraints if needed.
4165 : */
4166 102716 : resultRelInfo = mtstate->resultRelInfo;
4167 103978 : foreach(l, node->withCheckOptionLists)
4168 : {
4169 1262 : List *wcoList = (List *) lfirst(l);
4170 1262 : List *wcoExprs = NIL;
4171 : ListCell *ll;
4172 :
4173 3428 : foreach(ll, wcoList)
4174 : {
4175 2166 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4176 2166 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4177 : &mtstate->ps);
4178 :
4179 2166 : wcoExprs = lappend(wcoExprs, wcoExpr);
4180 : }
4181 :
4182 1262 : resultRelInfo->ri_WithCheckOptions = wcoList;
4183 1262 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4184 1262 : resultRelInfo++;
4185 : }
4186 :
4187 : /*
4188 : * Initialize RETURNING projections if needed.
4189 : */
4190 102716 : if (node->returningLists)
4191 : {
4192 : TupleTableSlot *slot;
4193 : ExprContext *econtext;
4194 :
4195 : /*
4196 : * Initialize result tuple slot and assign its rowtype using the first
4197 : * RETURNING list. We assume the rest will look the same.
4198 : */
4199 3992 : mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
4200 :
4201 : /* Set up a slot for the output of the RETURNING projection(s) */
4202 3992 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
4203 3992 : slot = mtstate->ps.ps_ResultTupleSlot;
4204 :
4205 : /* Need an econtext too */
4206 3992 : if (mtstate->ps.ps_ExprContext == NULL)
4207 3992 : ExecAssignExprContext(estate, &mtstate->ps);
4208 3992 : econtext = mtstate->ps.ps_ExprContext;
4209 :
4210 : /*
4211 : * Build a projection for each result rel.
4212 : */
4213 3992 : resultRelInfo = mtstate->resultRelInfo;
4214 8282 : foreach(l, node->returningLists)
4215 : {
4216 4290 : List *rlist = (List *) lfirst(l);
4217 :
4218 4290 : resultRelInfo->ri_returningList = rlist;
4219 4290 : resultRelInfo->ri_projectReturning =
4220 4290 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
4221 4290 : resultRelInfo->ri_RelationDesc->rd_att);
4222 4290 : resultRelInfo++;
4223 : }
4224 : }
4225 : else
4226 : {
4227 : /*
4228 : * We still must construct a dummy result tuple type, because InitPlan
4229 : * expects one (maybe should change that?).
4230 : */
4231 98724 : mtstate->ps.plan->targetlist = NIL;
4232 98724 : ExecInitResultTypeTL(&mtstate->ps);
4233 :
4234 98724 : mtstate->ps.ps_ExprContext = NULL;
4235 : }
4236 :
4237 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
4238 102716 : resultRelInfo = mtstate->resultRelInfo;
4239 102716 : if (node->onConflictAction != ONCONFLICT_NONE)
4240 : {
4241 : /* insert may only have one relation, inheritance is not expanded */
4242 : Assert(nrels == 1);
4243 1188 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4244 : }
4245 :
4246 : /*
4247 : * If needed, Initialize target list, projection and qual for ON CONFLICT
4248 : * DO UPDATE.
4249 : */
4250 102716 : if (node->onConflictAction == ONCONFLICT_UPDATE)
4251 : {
4252 900 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
4253 : ExprContext *econtext;
4254 : TupleDesc relationDesc;
4255 :
4256 : /* already exists if created by RETURNING processing above */
4257 900 : if (mtstate->ps.ps_ExprContext == NULL)
4258 632 : ExecAssignExprContext(estate, &mtstate->ps);
4259 :
4260 900 : econtext = mtstate->ps.ps_ExprContext;
4261 900 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4262 :
4263 : /* create state for DO UPDATE SET operation */
4264 900 : resultRelInfo->ri_onConflict = onconfl;
4265 :
4266 : /* initialize slot for the existing tuple */
4267 900 : onconfl->oc_Existing =
4268 900 : table_slot_create(resultRelInfo->ri_RelationDesc,
4269 900 : &mtstate->ps.state->es_tupleTable);
4270 :
4271 : /*
4272 : * Create the tuple slot for the UPDATE SET projection. We want a slot
4273 : * of the table's type here, because the slot will be used to insert
4274 : * into the table, and for RETURNING processing - which may access
4275 : * system attributes.
4276 : */
4277 900 : onconfl->oc_ProjSlot =
4278 900 : table_slot_create(resultRelInfo->ri_RelationDesc,
4279 900 : &mtstate->ps.state->es_tupleTable);
4280 :
4281 : /* build UPDATE SET projection state */
4282 900 : onconfl->oc_ProjInfo =
4283 900 : ExecBuildUpdateProjection(node->onConflictSet,
4284 : true,
4285 : node->onConflictCols,
4286 : relationDesc,
4287 : econtext,
4288 : onconfl->oc_ProjSlot,
4289 : &mtstate->ps);
4290 :
4291 : /* initialize state to evaluate the WHERE clause, if any */
4292 900 : if (node->onConflictWhere)
4293 : {
4294 : ExprState *qualexpr;
4295 :
4296 176 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
4297 : &mtstate->ps);
4298 176 : onconfl->oc_WhereClause = qualexpr;
4299 : }
4300 : }
4301 :
4302 : /*
4303 : * If we have any secondary relations in an UPDATE or DELETE, they need to
4304 : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
4305 : * EvalPlanQual mechanism needs to be told about them. This also goes for
4306 : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
4307 : */
4308 102716 : arowmarks = NIL;
4309 104888 : foreach(l, node->rowMarks)
4310 : {
4311 2172 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
4312 : ExecRowMark *erm;
4313 : ExecAuxRowMark *aerm;
4314 :
4315 : /* ignore "parent" rowmarks; they are irrelevant at runtime */
4316 2172 : if (rc->isParent)
4317 100 : continue;
4318 :
4319 : /* Find ExecRowMark and build ExecAuxRowMark */
4320 2072 : erm = ExecFindRowMark(estate, rc->rti, false);
4321 2072 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4322 2072 : arowmarks = lappend(arowmarks, aerm);
4323 : }
4324 :
4325 : /* For a MERGE command, initialize its state */
4326 102716 : if (mtstate->operation == CMD_MERGE)
4327 914 : ExecInitMerge(mtstate, estate);
4328 :
4329 102716 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4330 :
4331 : /*
4332 : * If there are a lot of result relations, use a hash table to speed the
4333 : * lookups. If there are not a lot, a simple linear search is faster.
4334 : *
4335 : * It's not clear where the threshold is, but try 64 for starters. In a
4336 : * debugging build, use a small threshold so that we get some test
4337 : * coverage of both code paths.
4338 : */
4339 : #ifdef USE_ASSERT_CHECKING
4340 : #define MT_NRELS_HASH 4
4341 : #else
4342 : #define MT_NRELS_HASH 64
4343 : #endif
4344 102716 : if (nrels >= MT_NRELS_HASH)
4345 : {
4346 : HASHCTL hash_ctl;
4347 :
4348 0 : hash_ctl.keysize = sizeof(Oid);
4349 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4350 0 : hash_ctl.hcxt = CurrentMemoryContext;
4351 0 : mtstate->mt_resultOidHash =
4352 0 : hash_create("ModifyTable target hash",
4353 : nrels, &hash_ctl,
4354 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
4355 0 : for (i = 0; i < nrels; i++)
4356 : {
4357 : Oid hashkey;
4358 : MTTargetRelLookup *mtlookup;
4359 : bool found;
4360 :
4361 0 : resultRelInfo = &mtstate->resultRelInfo[i];
4362 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
4363 : mtlookup = (MTTargetRelLookup *)
4364 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
4365 : HASH_ENTER, &found);
4366 : Assert(!found);
4367 0 : mtlookup->relationIndex = i;
4368 : }
4369 : }
4370 : else
4371 102716 : mtstate->mt_resultOidHash = NULL;
4372 :
4373 : /*
4374 : * Determine if the FDW supports batch insert and determine the batch size
4375 : * (a FDW may support batching, but it may be disabled for the
4376 : * server/table).
4377 : *
4378 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
4379 : * remains set to 0.
4380 : */
4381 102716 : if (operation == CMD_INSERT)
4382 : {
4383 : /* insert may only have one relation, inheritance is not expanded */
4384 : Assert(nrels == 1);
4385 77120 : resultRelInfo = mtstate->resultRelInfo;
4386 77120 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4387 77120 : resultRelInfo->ri_FdwRoutine != NULL &&
4388 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
4389 174 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
4390 : {
4391 174 : resultRelInfo->ri_BatchSize =
4392 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
4393 174 : Assert(resultRelInfo->ri_BatchSize >= 1);
4394 : }
4395 : else
4396 76946 : resultRelInfo->ri_BatchSize = 1;
4397 : }
4398 :
4399 : /*
4400 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
4401 : * to estate->es_auxmodifytables so that it will be run to completion by
4402 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
4403 : * ModifyTable node too, but there's no need.) Note the use of lcons not
4404 : * lappend: we need later-initialized ModifyTable nodes to be shut down
4405 : * before earlier ones. This ensures that we don't throw away RETURNING
4406 : * rows that need to be seen by a later CTE subplan.
4407 : */
4408 102716 : if (!mtstate->canSetTag)
4409 900 : estate->es_auxmodifytables = lcons(mtstate,
4410 : estate->es_auxmodifytables);
4411 :
4412 102716 : return mtstate;
4413 : }
4414 :
4415 : /* ----------------------------------------------------------------
4416 : * ExecEndModifyTable
4417 : *
4418 : * Shuts down the plan.
4419 : *
4420 : * Returns nothing of interest.
4421 : * ----------------------------------------------------------------
4422 : */
4423 : void
4424 99050 : ExecEndModifyTable(ModifyTableState *node)
4425 : {
4426 : int i;
4427 :
4428 : /*
4429 : * Allow any FDWs to shut down
4430 : */
4431 200016 : for (i = 0; i < node->mt_nrels; i++)
4432 : {
4433 : int j;
4434 100966 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
4435 :
4436 100966 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4437 100774 : resultRelInfo->ri_FdwRoutine != NULL &&
4438 286 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
4439 286 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
4440 : resultRelInfo);
4441 :
4442 : /*
4443 : * Cleanup the initialized batch slots. This only matters for FDWs
4444 : * with batching, but the other cases will have ri_NumSlotsInitialized
4445 : * == 0.
4446 : */
4447 101022 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
4448 : {
4449 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
4450 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
4451 : }
4452 : }
4453 :
4454 : /*
4455 : * Close all the partitioned tables, leaf partitions, and their indices
4456 : * and release the slot used for tuple routing, if set.
4457 : */
4458 99050 : if (node->mt_partition_tuple_routing)
4459 : {
4460 5376 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
4461 :
4462 5376 : if (node->mt_root_tuple_slot)
4463 514 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
4464 : }
4465 :
4466 : /*
4467 : * Terminate EPQ execution if active
4468 : */
4469 99050 : EvalPlanQualEnd(&node->mt_epqstate);
4470 :
4471 : /*
4472 : * shut down subplan
4473 : */
4474 99050 : ExecEndNode(outerPlanState(node));
4475 99050 : }
4476 :
4477 : void
4478 0 : ExecReScanModifyTable(ModifyTableState *node)
4479 : {
4480 : /*
4481 : * Currently, we don't need to support rescan on ModifyTable nodes. The
4482 : * semantics of that would be a bit debatable anyway.
4483 : */
4484 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
4485 : }
|