Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * MERGE runs a join between the source relation and the target table.
28 : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
29 : * is an outer join that might output tuples without a matching target
30 : * tuple. In this case, any unmatched target tuples will have NULL
31 : * row-locating info, and only INSERT can be run. But for matched target
32 : * tuples, the row-locating info is used to determine the tuple to UPDATE
33 : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
34 : * SOURCE, all tuples produced by the join will include a matching target
35 : * tuple, so all tuples contain row-locating info.
36 : *
37 : * If the query specifies RETURNING, then the ModifyTable returns a
38 : * RETURNING tuple after completing each row insert, update, or delete.
39 : * It must be called again to continue the operation. Without RETURNING,
40 : * we just loop within the node until all the work is done, then
41 : * return NULL. This avoids useless call/return overhead.
42 : */
43 :
44 : #include "postgres.h"
45 :
46 : #include "access/htup_details.h"
47 : #include "access/tableam.h"
48 : #include "access/xact.h"
49 : #include "commands/trigger.h"
50 : #include "executor/execPartition.h"
51 : #include "executor/executor.h"
52 : #include "executor/nodeModifyTable.h"
53 : #include "foreign/fdwapi.h"
54 : #include "miscadmin.h"
55 : #include "nodes/nodeFuncs.h"
56 : #include "optimizer/optimizer.h"
57 : #include "rewrite/rewriteHandler.h"
58 : #include "storage/lmgr.h"
59 : #include "utils/builtins.h"
60 : #include "utils/datum.h"
61 : #include "utils/rel.h"
62 : #include "utils/snapmgr.h"
63 :
64 :
65 : typedef struct MTTargetRelLookup
66 : {
67 : Oid relationOid; /* hash key, must be first */
68 : int relationIndex; /* rel's index in resultRelInfo[] array */
69 : } MTTargetRelLookup;
70 :
71 : /*
72 : * Context struct for a ModifyTable operation, containing basic execution
73 : * state and some output variables populated by ExecUpdateAct() and
74 : * ExecDeleteAct() to report the result of their actions to callers.
75 : */
76 : typedef struct ModifyTableContext
77 : {
78 : /* Operation state */
79 : ModifyTableState *mtstate;
80 : EPQState *epqstate;
81 : EState *estate;
82 :
83 : /*
84 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
85 : * access "junk" columns that are not going to be stored.
86 : */
87 : TupleTableSlot *planSlot;
88 :
89 : /*
90 : * Information about the changes that were made concurrently to a tuple
91 : * being updated or deleted
92 : */
93 : TM_FailureData tmfd;
94 :
95 : /*
96 : * The tuple projected by the INSERT's RETURNING clause, when doing a
97 : * cross-partition UPDATE
98 : */
99 : TupleTableSlot *cpUpdateReturningSlot;
100 : } ModifyTableContext;
101 :
102 : /*
103 : * Context struct containing output data specific to UPDATE operations.
104 : */
105 : typedef struct UpdateContext
106 : {
107 : bool crossPartUpdate; /* was it a cross-partition update? */
108 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
109 :
110 : /*
111 : * Lock mode to acquire on the latest tuple version before performing
112 : * EvalPlanQual on it
113 : */
114 : LockTupleMode lockmode;
115 : } UpdateContext;
116 :
117 :
118 : static void ExecBatchInsert(ModifyTableState *mtstate,
119 : ResultRelInfo *resultRelInfo,
120 : TupleTableSlot **slots,
121 : TupleTableSlot **planSlots,
122 : int numSlots,
123 : EState *estate,
124 : bool canSetTag);
125 : static void ExecPendingInserts(EState *estate);
126 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
127 : ResultRelInfo *sourcePartInfo,
128 : ResultRelInfo *destPartInfo,
129 : ItemPointer tupleid,
130 : TupleTableSlot *oldslot,
131 : TupleTableSlot *newslot);
132 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
133 : ResultRelInfo *resultRelInfo,
134 : ItemPointer conflictTid,
135 : TupleTableSlot *excludedSlot,
136 : bool canSetTag,
137 : TupleTableSlot **returning);
138 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
139 : EState *estate,
140 : PartitionTupleRouting *proute,
141 : ResultRelInfo *targetRelInfo,
142 : TupleTableSlot *slot,
143 : ResultRelInfo **partRelInfo);
144 :
145 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
146 : ResultRelInfo *resultRelInfo,
147 : ItemPointer tupleid,
148 : HeapTuple oldtuple,
149 : bool canSetTag);
150 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
151 : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
152 : ResultRelInfo *resultRelInfo,
153 : ItemPointer tupleid,
154 : HeapTuple oldtuple,
155 : bool canSetTag,
156 : bool *matched);
157 : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
158 : ResultRelInfo *resultRelInfo,
159 : bool canSetTag);
160 :
161 :
162 : /*
163 : * Verify that the tuples to be produced by INSERT match the
164 : * target relation's rowtype
165 : *
166 : * We do this to guard against stale plans. If plan invalidation is
167 : * functioning properly then we should never get a failure here, but better
168 : * safe than sorry. Note that this is called after we have obtained lock
169 : * on the target rel, so the rowtype can't change underneath us.
170 : *
171 : * The plan output is represented by its targetlist, because that makes
172 : * handling the dropped-column case easier.
173 : *
174 : * We used to use this for UPDATE as well, but now the equivalent checks
175 : * are done in ExecBuildUpdateProjection.
176 : */
177 : static void
178 90680 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
179 : {
180 90680 : TupleDesc resultDesc = RelationGetDescr(resultRel);
181 90680 : int attno = 0;
182 : ListCell *lc;
183 :
184 280028 : foreach(lc, targetList)
185 : {
186 189348 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
187 : Form_pg_attribute attr;
188 :
189 : Assert(!tle->resjunk); /* caller removed junk items already */
190 :
191 189348 : if (attno >= resultDesc->natts)
192 0 : ereport(ERROR,
193 : (errcode(ERRCODE_DATATYPE_MISMATCH),
194 : errmsg("table row type and query-specified row type do not match"),
195 : errdetail("Query has too many columns.")));
196 189348 : attr = TupleDescAttr(resultDesc, attno);
197 189348 : attno++;
198 :
199 189348 : if (!attr->attisdropped)
200 : {
201 : /* Normal case: demand type match */
202 188738 : if (exprType((Node *) tle->expr) != attr->atttypid)
203 0 : ereport(ERROR,
204 : (errcode(ERRCODE_DATATYPE_MISMATCH),
205 : errmsg("table row type and query-specified row type do not match"),
206 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
207 : format_type_be(attr->atttypid),
208 : attno,
209 : format_type_be(exprType((Node *) tle->expr)))));
210 : }
211 : else
212 : {
213 : /*
214 : * For a dropped column, we can't check atttypid (it's likely 0).
215 : * In any case the planner has most likely inserted an INT4 null.
216 : * What we insist on is just *some* NULL constant.
217 : */
218 610 : if (!IsA(tle->expr, Const) ||
219 610 : !((Const *) tle->expr)->constisnull)
220 0 : ereport(ERROR,
221 : (errcode(ERRCODE_DATATYPE_MISMATCH),
222 : errmsg("table row type and query-specified row type do not match"),
223 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
224 : attno)));
225 : }
226 : }
227 90680 : if (attno != resultDesc->natts)
228 0 : ereport(ERROR,
229 : (errcode(ERRCODE_DATATYPE_MISMATCH),
230 : errmsg("table row type and query-specified row type do not match"),
231 : errdetail("Query has too few columns.")));
232 90680 : }
233 :
234 : /*
235 : * ExecProcessReturning --- evaluate a RETURNING list
236 : *
237 : * resultRelInfo: current result rel
238 : * tupleSlot: slot holding tuple actually inserted/updated/deleted
239 : * planSlot: slot holding tuple returned by top subplan node
240 : *
241 : * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
242 : * scan tuple.
243 : *
244 : * Returns a slot holding the result tuple
245 : */
246 : static TupleTableSlot *
247 7414 : ExecProcessReturning(ResultRelInfo *resultRelInfo,
248 : TupleTableSlot *tupleSlot,
249 : TupleTableSlot *planSlot)
250 : {
251 7414 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
252 7414 : ExprContext *econtext = projectReturning->pi_exprContext;
253 :
254 : /* Make tuple and any needed join variables available to ExecProject */
255 7414 : if (tupleSlot)
256 6720 : econtext->ecxt_scantuple = tupleSlot;
257 7414 : econtext->ecxt_outertuple = planSlot;
258 :
259 : /*
260 : * RETURNING expressions might reference the tableoid column, so
261 : * reinitialize tts_tableOid before evaluating them.
262 : */
263 7414 : econtext->ecxt_scantuple->tts_tableOid =
264 7414 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
265 :
266 : /* Compute the RETURNING expressions */
267 7414 : return ExecProject(projectReturning);
268 : }
269 :
270 : /*
271 : * ExecCheckTupleVisible -- verify tuple is visible
272 : *
273 : * It would not be consistent with guarantees of the higher isolation levels to
274 : * proceed with avoiding insertion (taking speculative insertion's alternative
275 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
276 : * Check for the need to raise a serialization failure, and do so as necessary.
277 : */
278 : static void
279 5240 : ExecCheckTupleVisible(EState *estate,
280 : Relation rel,
281 : TupleTableSlot *slot)
282 : {
283 5240 : if (!IsolationUsesXactSnapshot())
284 5176 : return;
285 :
286 64 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
287 : {
288 : Datum xminDatum;
289 : TransactionId xmin;
290 : bool isnull;
291 :
292 40 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
293 : Assert(!isnull);
294 40 : xmin = DatumGetTransactionId(xminDatum);
295 :
296 : /*
297 : * We should not raise a serialization failure if the conflict is
298 : * against a tuple inserted by our own transaction, even if it's not
299 : * visible to our snapshot. (This would happen, for example, if
300 : * conflicting keys are proposed for insertion in a single command.)
301 : */
302 40 : if (!TransactionIdIsCurrentTransactionId(xmin))
303 20 : ereport(ERROR,
304 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
305 : errmsg("could not serialize access due to concurrent update")));
306 : }
307 : }
308 :
309 : /*
310 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
311 : */
312 : static void
313 158 : ExecCheckTIDVisible(EState *estate,
314 : ResultRelInfo *relinfo,
315 : ItemPointer tid,
316 : TupleTableSlot *tempSlot)
317 : {
318 158 : Relation rel = relinfo->ri_RelationDesc;
319 :
320 : /* Redundantly check isolation level */
321 158 : if (!IsolationUsesXactSnapshot())
322 94 : return;
323 :
324 64 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
325 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
326 64 : ExecCheckTupleVisible(estate, rel, tempSlot);
327 44 : ExecClearTuple(tempSlot);
328 : }
329 :
330 : /*
331 : * Initialize to compute stored generated columns for a tuple
332 : *
333 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI
334 : * or ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
335 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
336 : *
337 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
338 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
339 : * cross-partition UPDATEs, since a partition might be the target of both
340 : * UPDATE and INSERT actions.
341 : */
342 : void
343 59164 : ExecInitStoredGenerated(ResultRelInfo *resultRelInfo,
344 : EState *estate,
345 : CmdType cmdtype)
346 : {
347 59164 : Relation rel = resultRelInfo->ri_RelationDesc;
348 59164 : TupleDesc tupdesc = RelationGetDescr(rel);
349 59164 : int natts = tupdesc->natts;
350 : ExprState **ri_GeneratedExprs;
351 : int ri_NumGeneratedNeeded;
352 : Bitmapset *updatedCols;
353 : MemoryContext oldContext;
354 :
355 : /* Nothing to do if no generated columns */
356 59164 : if (!(tupdesc->constr && tupdesc->constr->has_generated_stored))
357 58102 : return;
358 :
359 : /*
360 : * In an UPDATE, we can skip computing any generated columns that do not
361 : * depend on any UPDATE target column. But if there is a BEFORE ROW
362 : * UPDATE trigger, we cannot skip because the trigger might change more
363 : * columns.
364 : */
365 1062 : if (cmdtype == CMD_UPDATE &&
366 230 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
367 204 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
368 : else
369 858 : updatedCols = NULL;
370 :
371 : /*
372 : * Make sure these data structures are built in the per-query memory
373 : * context so they'll survive throughout the query.
374 : */
375 1062 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
376 :
377 1062 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
378 1062 : ri_NumGeneratedNeeded = 0;
379 :
380 4510 : for (int i = 0; i < natts; i++)
381 : {
382 3448 : if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
383 : {
384 : Expr *expr;
385 :
386 : /* Fetch the GENERATED AS expression tree */
387 1082 : expr = (Expr *) build_column_default(rel, i + 1);
388 1082 : if (expr == NULL)
389 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
390 : i + 1, RelationGetRelationName(rel));
391 :
392 : /*
393 : * If it's an update with a known set of update target columns,
394 : * see if we can skip the computation.
395 : */
396 1082 : if (updatedCols)
397 : {
398 210 : Bitmapset *attrs_used = NULL;
399 :
400 210 : pull_varattnos((Node *) expr, 1, &attrs_used);
401 :
402 210 : if (!bms_overlap(updatedCols, attrs_used))
403 24 : continue; /* need not update this column */
404 : }
405 :
406 : /* No luck, so prepare the expression for execution */
407 1058 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
408 1058 : ri_NumGeneratedNeeded++;
409 :
410 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
411 1058 : if (cmdtype == CMD_UPDATE)
412 212 : resultRelInfo->ri_extraUpdatedCols =
413 212 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
414 : i + 1 - FirstLowInvalidHeapAttributeNumber);
415 : }
416 : }
417 :
418 : /* Save in appropriate set of fields */
419 1062 : if (cmdtype == CMD_UPDATE)
420 : {
421 : /* Don't call twice */
422 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
423 :
424 230 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
425 230 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
426 : }
427 : else
428 : {
429 : /* Don't call twice */
430 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
431 :
432 832 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
433 832 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
434 : }
435 :
436 1062 : MemoryContextSwitchTo(oldContext);
437 : }
438 :
439 : /*
440 : * Compute stored generated columns for a tuple
441 : */
442 : void
443 1356 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
444 : EState *estate, TupleTableSlot *slot,
445 : CmdType cmdtype)
446 : {
447 1356 : Relation rel = resultRelInfo->ri_RelationDesc;
448 1356 : TupleDesc tupdesc = RelationGetDescr(rel);
449 1356 : int natts = tupdesc->natts;
450 1356 : ExprContext *econtext = GetPerTupleExprContext(estate);
451 : ExprState **ri_GeneratedExprs;
452 : MemoryContext oldContext;
453 : Datum *values;
454 : bool *nulls;
455 :
456 : /* We should not be called unless this is true */
457 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
458 :
459 : /*
460 : * Initialize the expressions if we didn't already, and check whether we
461 : * can exit early because nothing needs to be computed.
462 : */
463 1356 : if (cmdtype == CMD_UPDATE)
464 : {
465 266 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
466 204 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
467 266 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
468 18 : return;
469 248 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
470 : }
471 : else
472 : {
473 1090 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
474 832 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
475 : /* Early exit is impossible given the prior Assert */
476 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
477 1090 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
478 : }
479 :
480 1338 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
481 :
482 1338 : values = palloc(sizeof(*values) * natts);
483 1338 : nulls = palloc(sizeof(*nulls) * natts);
484 :
485 1338 : slot_getallattrs(slot);
486 1338 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
487 :
488 5458 : for (int i = 0; i < natts; i++)
489 : {
490 4132 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
491 :
492 4132 : if (ri_GeneratedExprs[i])
493 : {
494 : Datum val;
495 : bool isnull;
496 :
497 : Assert(attr->attgenerated == ATTRIBUTE_GENERATED_STORED);
498 :
499 1352 : econtext->ecxt_scantuple = slot;
500 :
501 1352 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
502 :
503 : /*
504 : * We must make a copy of val as we have no guarantees about where
505 : * memory for a pass-by-reference Datum is located.
506 : */
507 1340 : if (!isnull)
508 1298 : val = datumCopy(val, attr->attbyval, attr->attlen);
509 :
510 1340 : values[i] = val;
511 1340 : nulls[i] = isnull;
512 : }
513 : else
514 : {
515 2780 : if (!nulls[i])
516 2738 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
517 : }
518 : }
519 :
520 1326 : ExecClearTuple(slot);
521 1326 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
522 1326 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
523 1326 : ExecStoreVirtualTuple(slot);
524 1326 : ExecMaterializeSlot(slot);
525 :
526 1326 : MemoryContextSwitchTo(oldContext);
527 : }
528 :
529 : /*
530 : * ExecInitInsertProjection
531 : * Do one-time initialization of projection data for INSERT tuples.
532 : *
533 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
534 : *
535 : * This is also a convenient place to verify that the
536 : * output of an INSERT matches the target table.
537 : */
538 : static void
539 89716 : ExecInitInsertProjection(ModifyTableState *mtstate,
540 : ResultRelInfo *resultRelInfo)
541 : {
542 89716 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
543 89716 : Plan *subplan = outerPlan(node);
544 89716 : EState *estate = mtstate->ps.state;
545 89716 : List *insertTargetList = NIL;
546 89716 : bool need_projection = false;
547 : ListCell *l;
548 :
549 : /* Extract non-junk columns of the subplan's result tlist. */
550 276618 : foreach(l, subplan->targetlist)
551 : {
552 186902 : TargetEntry *tle = (TargetEntry *) lfirst(l);
553 :
554 186902 : if (!tle->resjunk)
555 186902 : insertTargetList = lappend(insertTargetList, tle);
556 : else
557 0 : need_projection = true;
558 : }
559 :
560 : /*
561 : * The junk-free list must produce a tuple suitable for the result
562 : * relation.
563 : */
564 89716 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
565 :
566 : /* We'll need a slot matching the table's format. */
567 89716 : resultRelInfo->ri_newTupleSlot =
568 89716 : table_slot_create(resultRelInfo->ri_RelationDesc,
569 : &estate->es_tupleTable);
570 :
571 : /* Build ProjectionInfo if needed (it probably isn't). */
572 89716 : if (need_projection)
573 : {
574 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
575 :
576 : /* need an expression context to do the projection */
577 0 : if (mtstate->ps.ps_ExprContext == NULL)
578 0 : ExecAssignExprContext(estate, &mtstate->ps);
579 :
580 0 : resultRelInfo->ri_projectNew =
581 0 : ExecBuildProjectionInfo(insertTargetList,
582 : mtstate->ps.ps_ExprContext,
583 : resultRelInfo->ri_newTupleSlot,
584 : &mtstate->ps,
585 : relDesc);
586 : }
587 :
588 89716 : resultRelInfo->ri_projectNewInfoValid = true;
589 89716 : }
590 :
591 : /*
592 : * ExecInitUpdateProjection
593 : * Do one-time initialization of projection data for UPDATE tuples.
594 : *
595 : * UPDATE always needs a projection, because (1) there's always some junk
596 : * attrs, and (2) we may need to merge values of not-updated columns from
597 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
598 : * the subplan contains only new values for the changed columns, plus row
599 : * identity info in the junk attrs.
600 : *
601 : * This is "one-time" for any given result rel, but we might touch more than
602 : * one result rel in the course of an inherited UPDATE, and each one needs
603 : * its own projection due to possible column order variation.
604 : *
605 : * This is also a convenient place to verify that the output of an UPDATE
606 : * matches the target table (ExecBuildUpdateProjection does that).
607 : */
608 : static void
609 12748 : ExecInitUpdateProjection(ModifyTableState *mtstate,
610 : ResultRelInfo *resultRelInfo)
611 : {
612 12748 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
613 12748 : Plan *subplan = outerPlan(node);
614 12748 : EState *estate = mtstate->ps.state;
615 12748 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
616 : int whichrel;
617 : List *updateColnos;
618 :
619 : /*
620 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
621 : * to, we can get the index the hard way with an integer division.
622 : */
623 12748 : whichrel = mtstate->mt_lastResultIndex;
624 12748 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
625 : {
626 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
627 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
628 : }
629 :
630 12748 : updateColnos = (List *) list_nth(node->updateColnosLists, whichrel);
631 :
632 : /*
633 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
634 : * produced by the subplan to get the new tuple. We need two slots, both
635 : * matching the table's desired format.
636 : */
637 12748 : resultRelInfo->ri_oldTupleSlot =
638 12748 : table_slot_create(resultRelInfo->ri_RelationDesc,
639 : &estate->es_tupleTable);
640 12748 : resultRelInfo->ri_newTupleSlot =
641 12748 : table_slot_create(resultRelInfo->ri_RelationDesc,
642 : &estate->es_tupleTable);
643 :
644 : /* need an expression context to do the projection */
645 12748 : if (mtstate->ps.ps_ExprContext == NULL)
646 11564 : ExecAssignExprContext(estate, &mtstate->ps);
647 :
648 12748 : resultRelInfo->ri_projectNew =
649 12748 : ExecBuildUpdateProjection(subplan->targetlist,
650 : false, /* subplan did the evaluation */
651 : updateColnos,
652 : relDesc,
653 : mtstate->ps.ps_ExprContext,
654 : resultRelInfo->ri_newTupleSlot,
655 : &mtstate->ps);
656 :
657 12748 : resultRelInfo->ri_projectNewInfoValid = true;
658 12748 : }
659 :
660 : /*
661 : * ExecGetInsertNewTuple
662 : * This prepares a "new" tuple ready to be inserted into given result
663 : * relation, by removing any junk columns of the plan's output tuple
664 : * and (if necessary) coercing the tuple to the right tuple format.
665 : */
666 : static TupleTableSlot *
667 11201154 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
668 : TupleTableSlot *planSlot)
669 : {
670 11201154 : ProjectionInfo *newProj = relinfo->ri_projectNew;
671 : ExprContext *econtext;
672 :
673 : /*
674 : * If there's no projection to be done, just make sure the slot is of the
675 : * right type for the target rel. If the planSlot is the right type we
676 : * can use it as-is, else copy the data into ri_newTupleSlot.
677 : */
678 11201154 : if (newProj == NULL)
679 : {
680 11201154 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
681 : {
682 10429214 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
683 10429214 : return relinfo->ri_newTupleSlot;
684 : }
685 : else
686 771940 : return planSlot;
687 : }
688 :
689 : /*
690 : * Else project; since the projection output slot is ri_newTupleSlot, this
691 : * will also fix any slot-type problem.
692 : *
693 : * Note: currently, this is dead code, because INSERT cases don't receive
694 : * any junk columns so there's never a projection to be done.
695 : */
696 0 : econtext = newProj->pi_exprContext;
697 0 : econtext->ecxt_outertuple = planSlot;
698 0 : return ExecProject(newProj);
699 : }
700 :
701 : /*
702 : * ExecGetUpdateNewTuple
703 : * This prepares a "new" tuple by combining an UPDATE subplan's output
704 : * tuple (which contains values of changed columns) with unchanged
705 : * columns taken from the old tuple.
706 : *
707 : * The subplan tuple might also contain junk columns, which are ignored.
708 : * Note that the projection also ensures we have a slot of the right type.
709 : */
710 : TupleTableSlot *
711 307142 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
712 : TupleTableSlot *planSlot,
713 : TupleTableSlot *oldSlot)
714 : {
715 307142 : ProjectionInfo *newProj = relinfo->ri_projectNew;
716 : ExprContext *econtext;
717 :
718 : /* Use a few extra Asserts to protect against outside callers */
719 : Assert(relinfo->ri_projectNewInfoValid);
720 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
721 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
722 :
723 307142 : econtext = newProj->pi_exprContext;
724 307142 : econtext->ecxt_outertuple = planSlot;
725 307142 : econtext->ecxt_scantuple = oldSlot;
726 307142 : return ExecProject(newProj);
727 : }
728 :
729 : /* ----------------------------------------------------------------
730 : * ExecInsert
731 : *
732 : * For INSERT, we have to insert the tuple into the target relation
733 : * (or partition thereof) and insert appropriate tuples into the index
734 : * relations.
735 : *
736 : * slot contains the new tuple value to be stored.
737 : *
738 : * Returns RETURNING result if any, otherwise NULL.
739 : * *inserted_tuple is the tuple that's effectively inserted;
740 : * *insert_destrel is the relation where it was inserted.
741 : * These are only set on success.
742 : *
743 : * This may change the currently active tuple conversion map in
744 : * mtstate->mt_transition_capture, so the callers must take care to
745 : * save the previous value to avoid losing track of it.
746 : * ----------------------------------------------------------------
747 : */
748 : static TupleTableSlot *
749 11203812 : ExecInsert(ModifyTableContext *context,
750 : ResultRelInfo *resultRelInfo,
751 : TupleTableSlot *slot,
752 : bool canSetTag,
753 : TupleTableSlot **inserted_tuple,
754 : ResultRelInfo **insert_destrel)
755 : {
756 11203812 : ModifyTableState *mtstate = context->mtstate;
757 11203812 : EState *estate = context->estate;
758 : Relation resultRelationDesc;
759 11203812 : List *recheckIndexes = NIL;
760 11203812 : TupleTableSlot *planSlot = context->planSlot;
761 11203812 : TupleTableSlot *result = NULL;
762 : TransitionCaptureState *ar_insert_trig_tcs;
763 11203812 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
764 11203812 : OnConflictAction onconflict = node->onConflictAction;
765 11203812 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
766 : MemoryContext oldContext;
767 :
768 : /*
769 : * If the input result relation is a partitioned table, find the leaf
770 : * partition to insert the tuple into.
771 : */
772 11203812 : if (proute)
773 : {
774 : ResultRelInfo *partRelInfo;
775 :
776 723086 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
777 : resultRelInfo, slot,
778 : &partRelInfo);
779 722882 : resultRelInfo = partRelInfo;
780 : }
781 :
782 11203608 : ExecMaterializeSlot(slot);
783 :
784 11203608 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
785 :
786 : /*
787 : * Open the table's indexes, if we have not done so already, so that we
788 : * can add new index entries for the inserted tuple.
789 : */
790 11203608 : if (resultRelationDesc->rd_rel->relhasindex &&
791 2811546 : resultRelInfo->ri_IndexRelationDescs == NULL)
792 29280 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
793 :
794 : /*
795 : * BEFORE ROW INSERT Triggers.
796 : *
797 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
798 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
799 : * violations before firing these triggers, because they can change the
800 : * values to insert. Also, they can run arbitrary user-defined code with
801 : * side-effects that we can't cancel by just not inserting the tuple.
802 : */
803 11203608 : if (resultRelInfo->ri_TrigDesc &&
804 75214 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
805 : {
806 : /* Flush any pending inserts, so rows are visible to the triggers */
807 2120 : if (estate->es_insert_pending_result_relations != NIL)
808 6 : ExecPendingInserts(estate);
809 :
810 2120 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
811 200 : return NULL; /* "do nothing" */
812 : }
813 :
814 : /* INSTEAD OF ROW INSERT Triggers */
815 11203292 : if (resultRelInfo->ri_TrigDesc &&
816 74898 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
817 : {
818 168 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
819 6 : return NULL; /* "do nothing" */
820 : }
821 11203124 : else if (resultRelInfo->ri_FdwRoutine)
822 : {
823 : /*
824 : * GENERATED expressions might reference the tableoid column, so
825 : * (re-)initialize tts_tableOid before evaluating them.
826 : */
827 2014 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
828 :
829 : /*
830 : * Compute stored generated columns
831 : */
832 2014 : if (resultRelationDesc->rd_att->constr &&
833 366 : resultRelationDesc->rd_att->constr->has_generated_stored)
834 8 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
835 : CMD_INSERT);
836 :
837 : /*
838 : * If the FDW supports batching, and batching is requested, accumulate
839 : * rows and insert them in batches. Otherwise use the per-row inserts.
840 : */
841 2014 : if (resultRelInfo->ri_BatchSize > 1)
842 : {
843 288 : bool flushed = false;
844 :
845 : /*
846 : * When we've reached the desired batch size, perform the
847 : * insertion.
848 : */
849 288 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
850 : {
851 20 : ExecBatchInsert(mtstate, resultRelInfo,
852 : resultRelInfo->ri_Slots,
853 : resultRelInfo->ri_PlanSlots,
854 : resultRelInfo->ri_NumSlots,
855 : estate, canSetTag);
856 20 : flushed = true;
857 : }
858 :
859 288 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
860 :
861 288 : if (resultRelInfo->ri_Slots == NULL)
862 : {
863 56 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
864 28 : resultRelInfo->ri_BatchSize);
865 28 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
866 28 : resultRelInfo->ri_BatchSize);
867 : }
868 :
869 : /*
870 : * Initialize the batch slots. We don't know how many slots will
871 : * be needed, so we initialize them as the batch grows, and we
872 : * keep them across batches. To mitigate an inefficiency in how
873 : * resource owner handles objects with many references (as with
874 : * many slots all referencing the same tuple descriptor) we copy
875 : * the appropriate tuple descriptor for each slot.
876 : */
877 288 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
878 : {
879 142 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
880 : TupleDesc plan_tdesc =
881 142 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
882 :
883 284 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
884 142 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
885 :
886 284 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
887 142 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
888 :
889 : /* remember how many batch slots we initialized */
890 142 : resultRelInfo->ri_NumSlotsInitialized++;
891 : }
892 :
893 288 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
894 : slot);
895 :
896 288 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
897 : planSlot);
898 :
899 : /*
900 : * If these are the first tuples stored in the buffers, add the
901 : * target rel and the mtstate to the
902 : * es_insert_pending_result_relations and
903 : * es_insert_pending_modifytables lists respectively, except in
904 : * the case where flushing was done above, in which case they
905 : * would already have been added to the lists, so no need to do
906 : * this.
907 : */
908 288 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
909 : {
910 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
911 : resultRelInfo));
912 36 : estate->es_insert_pending_result_relations =
913 36 : lappend(estate->es_insert_pending_result_relations,
914 : resultRelInfo);
915 36 : estate->es_insert_pending_modifytables =
916 36 : lappend(estate->es_insert_pending_modifytables, mtstate);
917 : }
918 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
919 : resultRelInfo));
920 :
921 288 : resultRelInfo->ri_NumSlots++;
922 :
923 288 : MemoryContextSwitchTo(oldContext);
924 :
925 288 : return NULL;
926 : }
927 :
928 : /*
929 : * insert into foreign table: let the FDW do it
930 : */
931 1726 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
932 : resultRelInfo,
933 : slot,
934 : planSlot);
935 :
936 1720 : if (slot == NULL) /* "do nothing" */
937 4 : return NULL;
938 :
939 : /*
940 : * AFTER ROW Triggers or RETURNING expressions might reference the
941 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
942 : * them. (This covers the case where the FDW replaced the slot.)
943 : */
944 1716 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
945 : }
946 : else
947 : {
948 : WCOKind wco_kind;
949 :
950 : /*
951 : * Constraints and GENERATED expressions might reference the tableoid
952 : * column, so (re-)initialize tts_tableOid before evaluating them.
953 : */
954 11201110 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
955 :
956 : /*
957 : * Compute stored generated columns
958 : */
959 11201110 : if (resultRelationDesc->rd_att->constr &&
960 3036718 : resultRelationDesc->rd_att->constr->has_generated_stored)
961 1038 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
962 : CMD_INSERT);
963 :
964 : /*
965 : * Check any RLS WITH CHECK policies.
966 : *
967 : * Normally we should check INSERT policies. But if the insert is the
968 : * result of a partition key update that moved the tuple to a new
969 : * partition, we should instead check UPDATE policies, because we are
970 : * executing policies defined on the target table, and not those
971 : * defined on the child partitions.
972 : *
973 : * If we're running MERGE, we refer to the action that we're executing
974 : * to know if we're doing an INSERT or UPDATE to a partition table.
975 : */
976 11201098 : if (mtstate->operation == CMD_UPDATE)
977 710 : wco_kind = WCO_RLS_UPDATE_CHECK;
978 11200388 : else if (mtstate->operation == CMD_MERGE)
979 1708 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
980 1708 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
981 : else
982 11198680 : wco_kind = WCO_RLS_INSERT_CHECK;
983 :
984 : /*
985 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
986 : * we are looking for at this point.
987 : */
988 11201098 : if (resultRelInfo->ri_WithCheckOptions != NIL)
989 552 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
990 :
991 : /*
992 : * Check the constraints of the tuple.
993 : */
994 11200924 : if (resultRelationDesc->rd_att->constr)
995 3036622 : ExecConstraints(resultRelInfo, slot, estate);
996 :
997 : /*
998 : * Also check the tuple against the partition constraint, if there is
999 : * one; except that if we got here via tuple-routing, we don't need to
1000 : * if there's no BR trigger defined on the partition.
1001 : */
1002 11200272 : if (resultRelationDesc->rd_rel->relispartition &&
1003 727134 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1004 722298 : (resultRelInfo->ri_TrigDesc &&
1005 1520 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1006 5032 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1007 :
1008 11200104 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1009 4010 : {
1010 : /* Perform a speculative insertion. */
1011 : uint32 specToken;
1012 : ItemPointerData conflictTid;
1013 : bool specConflict;
1014 : List *arbiterIndexes;
1015 :
1016 9380 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1017 :
1018 : /*
1019 : * Do a non-conclusive check for conflicts first.
1020 : *
1021 : * We're not holding any locks yet, so this doesn't guarantee that
1022 : * the later insert won't conflict. But it avoids leaving behind
1023 : * a lot of canceled speculative insertions, if you run a lot of
1024 : * INSERT ON CONFLICT statements that do conflict.
1025 : *
1026 : * We loop back here if we find a conflict below, either during
1027 : * the pre-check, or when we re-check after inserting the tuple
1028 : * speculatively. Better allow interrupts in case some bug makes
1029 : * this an infinite loop.
1030 : */
1031 9390 : vlock:
1032 9390 : CHECK_FOR_INTERRUPTS();
1033 9390 : specConflict = false;
1034 9390 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1035 : &conflictTid, arbiterIndexes))
1036 : {
1037 : /* committed conflict tuple found */
1038 5358 : if (onconflict == ONCONFLICT_UPDATE)
1039 : {
1040 : /*
1041 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1042 : * part. Be prepared to retry if the UPDATE fails because
1043 : * of another concurrent UPDATE/DELETE to the conflict
1044 : * tuple.
1045 : */
1046 5200 : TupleTableSlot *returning = NULL;
1047 :
1048 5200 : if (ExecOnConflictUpdate(context, resultRelInfo,
1049 : &conflictTid, slot, canSetTag,
1050 : &returning))
1051 : {
1052 5122 : InstrCountTuples2(&mtstate->ps, 1);
1053 5122 : return returning;
1054 : }
1055 : else
1056 0 : goto vlock;
1057 : }
1058 : else
1059 : {
1060 : /*
1061 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1062 : * verify that the tuple is visible to the executor's MVCC
1063 : * snapshot at higher isolation levels.
1064 : *
1065 : * Using ExecGetReturningSlot() to store the tuple for the
1066 : * recheck isn't that pretty, but we can't trivially use
1067 : * the input slot, because it might not be of a compatible
1068 : * type. As there's no conflicting usage of
1069 : * ExecGetReturningSlot() in the DO NOTHING case...
1070 : */
1071 : Assert(onconflict == ONCONFLICT_NOTHING);
1072 158 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1073 : ExecGetReturningSlot(estate, resultRelInfo));
1074 138 : InstrCountTuples2(&mtstate->ps, 1);
1075 138 : return NULL;
1076 : }
1077 : }
1078 :
1079 : /*
1080 : * Before we start insertion proper, acquire our "speculative
1081 : * insertion lock". Others can use that to wait for us to decide
1082 : * if we're going to go ahead with the insertion, instead of
1083 : * waiting for the whole transaction to complete.
1084 : */
1085 4026 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1086 :
1087 : /* insert the tuple, with the speculative token */
1088 4026 : table_tuple_insert_speculative(resultRelationDesc, slot,
1089 : estate->es_output_cid,
1090 : 0,
1091 : NULL,
1092 : specToken);
1093 :
1094 : /* insert index entries for tuple */
1095 4026 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1096 : slot, estate, false, true,
1097 : &specConflict,
1098 : arbiterIndexes,
1099 : false);
1100 :
1101 : /* adjust the tuple's state accordingly */
1102 4020 : table_tuple_complete_speculative(resultRelationDesc, slot,
1103 4020 : specToken, !specConflict);
1104 :
1105 : /*
1106 : * Wake up anyone waiting for our decision. They will re-check
1107 : * the tuple, see that it's no longer speculative, and wait on our
1108 : * XID as if this was a regularly inserted tuple all along. Or if
1109 : * we killed the tuple, they will see it's dead, and proceed as if
1110 : * the tuple never existed.
1111 : */
1112 4020 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1113 :
1114 : /*
1115 : * If there was a conflict, start from the beginning. We'll do
1116 : * the pre-check again, which will now find the conflicting tuple
1117 : * (unless it aborts before we get there).
1118 : */
1119 4020 : if (specConflict)
1120 : {
1121 10 : list_free(recheckIndexes);
1122 10 : goto vlock;
1123 : }
1124 :
1125 : /* Since there was no insertion conflict, we're done */
1126 : }
1127 : else
1128 : {
1129 : /* insert the tuple normally */
1130 11190724 : table_tuple_insert(resultRelationDesc, slot,
1131 : estate->es_output_cid,
1132 : 0, NULL);
1133 :
1134 : /* insert index entries for tuple */
1135 11190700 : if (resultRelInfo->ri_NumIndices > 0)
1136 2801488 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1137 : slot, estate, false,
1138 : false, NULL, NIL,
1139 : false);
1140 : }
1141 : }
1142 :
1143 11196050 : if (canSetTag)
1144 11194878 : (estate->es_processed)++;
1145 :
1146 : /*
1147 : * If this insert is the result of a partition key update that moved the
1148 : * tuple to a new partition, put this row into the transition NEW TABLE,
1149 : * if there is one. We need to do this separately for DELETE and INSERT
1150 : * because they happen on different tables.
1151 : */
1152 11196050 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1153 11196050 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1154 42 : && mtstate->mt_transition_capture->tcs_update_new_table)
1155 : {
1156 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1157 : NULL, NULL,
1158 : NULL,
1159 : NULL,
1160 : slot,
1161 : NULL,
1162 42 : mtstate->mt_transition_capture,
1163 : false);
1164 :
1165 : /*
1166 : * We've already captured the NEW TABLE row, so make sure any AR
1167 : * INSERT trigger fired below doesn't capture it again.
1168 : */
1169 42 : ar_insert_trig_tcs = NULL;
1170 : }
1171 :
1172 : /* AFTER ROW INSERT Triggers */
1173 11196050 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1174 : ar_insert_trig_tcs);
1175 :
1176 11196050 : list_free(recheckIndexes);
1177 :
1178 : /*
1179 : * Check any WITH CHECK OPTION constraints from parent views. We are
1180 : * required to do this after testing all constraints and uniqueness
1181 : * violations per the SQL spec, so we do it after actually inserting the
1182 : * record into the heap and all indexes.
1183 : *
1184 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1185 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1186 : *
1187 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1188 : * are looking for at this point.
1189 : */
1190 11196050 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1191 364 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1192 :
1193 : /* Process RETURNING if present */
1194 11195904 : if (resultRelInfo->ri_projectReturning)
1195 3464 : result = ExecProcessReturning(resultRelInfo, slot, planSlot);
1196 :
1197 11195892 : if (inserted_tuple)
1198 736 : *inserted_tuple = slot;
1199 11195892 : if (insert_destrel)
1200 736 : *insert_destrel = resultRelInfo;
1201 :
1202 11195892 : return result;
1203 : }
1204 :
1205 : /* ----------------------------------------------------------------
1206 : * ExecBatchInsert
1207 : *
1208 : * Insert multiple tuples in an efficient way.
1209 : * Currently, this handles inserting into a foreign table without
1210 : * RETURNING clause.
1211 : * ----------------------------------------------------------------
1212 : */
1213 : static void
1214 56 : ExecBatchInsert(ModifyTableState *mtstate,
1215 : ResultRelInfo *resultRelInfo,
1216 : TupleTableSlot **slots,
1217 : TupleTableSlot **planSlots,
1218 : int numSlots,
1219 : EState *estate,
1220 : bool canSetTag)
1221 : {
1222 : int i;
1223 56 : int numInserted = numSlots;
1224 56 : TupleTableSlot *slot = NULL;
1225 : TupleTableSlot **rslots;
1226 :
1227 : /*
1228 : * insert into foreign table: let the FDW do it
1229 : */
1230 56 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1231 : resultRelInfo,
1232 : slots,
1233 : planSlots,
1234 : &numInserted);
1235 :
1236 344 : for (i = 0; i < numInserted; i++)
1237 : {
1238 288 : slot = rslots[i];
1239 :
1240 : /*
1241 : * AFTER ROW Triggers might reference the tableoid column, so
1242 : * (re-)initialize tts_tableOid before evaluating them.
1243 : */
1244 288 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1245 :
1246 : /* AFTER ROW INSERT Triggers */
1247 288 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1248 288 : mtstate->mt_transition_capture);
1249 :
1250 : /*
1251 : * Check any WITH CHECK OPTION constraints from parent views. See the
1252 : * comment in ExecInsert.
1253 : */
1254 288 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1255 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1256 : }
1257 :
1258 56 : if (canSetTag && numInserted > 0)
1259 56 : estate->es_processed += numInserted;
1260 :
1261 : /* Clean up all the slots, ready for the next batch */
1262 344 : for (i = 0; i < numSlots; i++)
1263 : {
1264 288 : ExecClearTuple(slots[i]);
1265 288 : ExecClearTuple(planSlots[i]);
1266 : }
1267 56 : resultRelInfo->ri_NumSlots = 0;
1268 56 : }
1269 :
1270 : /*
1271 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1272 : */
1273 : static void
1274 34 : ExecPendingInserts(EState *estate)
1275 : {
1276 : ListCell *l1,
1277 : *l2;
1278 :
1279 70 : forboth(l1, estate->es_insert_pending_result_relations,
1280 : l2, estate->es_insert_pending_modifytables)
1281 : {
1282 36 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1283 36 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1284 :
1285 : Assert(mtstate);
1286 36 : ExecBatchInsert(mtstate, resultRelInfo,
1287 : resultRelInfo->ri_Slots,
1288 : resultRelInfo->ri_PlanSlots,
1289 : resultRelInfo->ri_NumSlots,
1290 36 : estate, mtstate->canSetTag);
1291 : }
1292 :
1293 34 : list_free(estate->es_insert_pending_result_relations);
1294 34 : list_free(estate->es_insert_pending_modifytables);
1295 34 : estate->es_insert_pending_result_relations = NIL;
1296 34 : estate->es_insert_pending_modifytables = NIL;
1297 34 : }
1298 :
1299 : /*
1300 : * ExecDeletePrologue -- subroutine for ExecDelete
1301 : *
1302 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1303 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1304 : * the delete a no-op; otherwise, return true.
1305 : */
1306 : static bool
1307 1526168 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1308 : ItemPointer tupleid, HeapTuple oldtuple,
1309 : TupleTableSlot **epqreturnslot, TM_Result *result)
1310 : {
1311 1526168 : if (result)
1312 1346 : *result = TM_Ok;
1313 :
1314 : /* BEFORE ROW DELETE triggers */
1315 1526168 : if (resultRelInfo->ri_TrigDesc &&
1316 7096 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1317 : {
1318 : /* Flush any pending inserts, so rows are visible to the triggers */
1319 388 : if (context->estate->es_insert_pending_result_relations != NIL)
1320 2 : ExecPendingInserts(context->estate);
1321 :
1322 388 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1323 : resultRelInfo, tupleid, oldtuple,
1324 : epqreturnslot, result, &context->tmfd);
1325 : }
1326 :
1327 1525780 : return true;
1328 : }
1329 :
1330 : /*
1331 : * ExecDeleteAct -- subroutine for ExecDelete
1332 : *
1333 : * Actually delete the tuple from a plain table.
1334 : *
1335 : * Caller is in charge of doing EvalPlanQual as necessary
1336 : */
1337 : static TM_Result
1338 1525980 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1339 : ItemPointer tupleid, bool changingPart)
1340 : {
1341 1525980 : EState *estate = context->estate;
1342 :
1343 1525980 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1344 : estate->es_output_cid,
1345 : estate->es_snapshot,
1346 : estate->es_crosscheck_snapshot,
1347 : true /* wait for commit */ ,
1348 : &context->tmfd,
1349 : changingPart);
1350 : }
1351 :
1352 : /*
1353 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1354 : *
1355 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1356 : * including the UPDATE triggers if the deletion is being done as part of a
1357 : * cross-partition tuple move.
1358 : */
1359 : static void
1360 1525920 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1361 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1362 : {
1363 1525920 : ModifyTableState *mtstate = context->mtstate;
1364 1525920 : EState *estate = context->estate;
1365 : TransitionCaptureState *ar_delete_trig_tcs;
1366 :
1367 : /*
1368 : * If this delete is the result of a partition key update that moved the
1369 : * tuple to a new partition, put this row into the transition OLD TABLE,
1370 : * if there is one. We need to do this separately for DELETE and INSERT
1371 : * because they happen on different tables.
1372 : */
1373 1525920 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1374 1525920 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1375 42 : mtstate->mt_transition_capture->tcs_update_old_table)
1376 : {
1377 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1378 : NULL, NULL,
1379 : tupleid, oldtuple,
1380 42 : NULL, NULL, mtstate->mt_transition_capture,
1381 : false);
1382 :
1383 : /*
1384 : * We've already captured the OLD TABLE row, so make sure any AR
1385 : * DELETE trigger fired below doesn't capture it again.
1386 : */
1387 42 : ar_delete_trig_tcs = NULL;
1388 : }
1389 :
1390 : /* AFTER ROW DELETE Triggers */
1391 1525920 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1392 : ar_delete_trig_tcs, changingPart);
1393 1525920 : }
1394 :
1395 : /* ----------------------------------------------------------------
1396 : * ExecDelete
1397 : *
1398 : * DELETE is like UPDATE, except that we delete the tuple and no
1399 : * index modifications are needed.
1400 : *
1401 : * When deleting from a table, tupleid identifies the tuple to
1402 : * delete and oldtuple is NULL. When deleting from a view,
1403 : * oldtuple is passed to the INSTEAD OF triggers and identifies
1404 : * what to delete, and tupleid is invalid. When deleting from a
1405 : * foreign table, tupleid is invalid; the FDW has to figure out
1406 : * which row to delete using data from the planSlot. oldtuple is
1407 : * passed to foreign table triggers; it is NULL when the foreign
1408 : * table has no relevant triggers. We use tupleDeleted to indicate
1409 : * whether the tuple is actually deleted, callers can use it to
1410 : * decide whether to continue the operation. When this DELETE is a
1411 : * part of an UPDATE of partition-key, then the slot returned by
1412 : * EvalPlanQual() is passed back using output parameter epqreturnslot.
1413 : *
1414 : * Returns RETURNING result if any, otherwise NULL.
1415 : * ----------------------------------------------------------------
1416 : */
1417 : static TupleTableSlot *
1418 1525792 : ExecDelete(ModifyTableContext *context,
1419 : ResultRelInfo *resultRelInfo,
1420 : ItemPointer tupleid,
1421 : HeapTuple oldtuple,
1422 : bool processReturning,
1423 : bool changingPart,
1424 : bool canSetTag,
1425 : TM_Result *tmresult,
1426 : bool *tupleDeleted,
1427 : TupleTableSlot **epqreturnslot)
1428 : {
1429 1525792 : EState *estate = context->estate;
1430 1525792 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1431 1525792 : TupleTableSlot *slot = NULL;
1432 : TM_Result result;
1433 :
1434 1525792 : if (tupleDeleted)
1435 970 : *tupleDeleted = false;
1436 :
1437 : /*
1438 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1439 : * done if it says we are.
1440 : */
1441 1525792 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1442 : epqreturnslot, tmresult))
1443 52 : return NULL;
1444 :
1445 : /* INSTEAD OF ROW DELETE Triggers */
1446 1525706 : if (resultRelInfo->ri_TrigDesc &&
1447 6954 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1448 48 : {
1449 : bool dodelete;
1450 :
1451 : Assert(oldtuple != NULL);
1452 54 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1453 :
1454 54 : if (!dodelete) /* "do nothing" */
1455 6 : return NULL;
1456 : }
1457 1525652 : else if (resultRelInfo->ri_FdwRoutine)
1458 : {
1459 : /*
1460 : * delete from foreign table: let the FDW do it
1461 : *
1462 : * We offer the returning slot as a place to store RETURNING data,
1463 : * although the FDW can return some other slot if it wants.
1464 : */
1465 34 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1466 34 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1467 : resultRelInfo,
1468 : slot,
1469 : context->planSlot);
1470 :
1471 34 : if (slot == NULL) /* "do nothing" */
1472 0 : return NULL;
1473 :
1474 : /*
1475 : * RETURNING expressions might reference the tableoid column, so
1476 : * (re)initialize tts_tableOid before evaluating them.
1477 : */
1478 34 : if (TTS_EMPTY(slot))
1479 6 : ExecStoreAllNullTuple(slot);
1480 :
1481 34 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1482 : }
1483 : else
1484 : {
1485 : /*
1486 : * delete the tuple
1487 : *
1488 : * Note: if context->estate->es_crosscheck_snapshot isn't
1489 : * InvalidSnapshot, we check that the row to be deleted is visible to
1490 : * that snapshot, and throw a can't-serialize error if not. This is a
1491 : * special-case behavior needed for referential integrity updates in
1492 : * transaction-snapshot mode transactions.
1493 : */
1494 1525618 : ldelete:
1495 1525622 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1496 :
1497 1525586 : if (tmresult)
1498 936 : *tmresult = result;
1499 :
1500 1525586 : switch (result)
1501 : {
1502 30 : case TM_SelfModified:
1503 :
1504 : /*
1505 : * The target tuple was already updated or deleted by the
1506 : * current command, or by a later command in the current
1507 : * transaction. The former case is possible in a join DELETE
1508 : * where multiple tuples join to the same target tuple. This
1509 : * is somewhat questionable, but Postgres has always allowed
1510 : * it: we just ignore additional deletion attempts.
1511 : *
1512 : * The latter case arises if the tuple is modified by a
1513 : * command in a BEFORE trigger, or perhaps by a command in a
1514 : * volatile function used in the query. In such situations we
1515 : * should not ignore the deletion, but it is equally unsafe to
1516 : * proceed. We don't want to discard the original DELETE
1517 : * while keeping the triggered actions based on its deletion;
1518 : * and it would be no better to allow the original DELETE
1519 : * while discarding updates that it triggered. The row update
1520 : * carries some information that might be important according
1521 : * to business rules; so throwing an error is the only safe
1522 : * course.
1523 : *
1524 : * If a trigger actually intends this type of interaction, it
1525 : * can re-execute the DELETE and then return NULL to cancel
1526 : * the outer delete.
1527 : */
1528 30 : if (context->tmfd.cmax != estate->es_output_cid)
1529 6 : ereport(ERROR,
1530 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1531 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1532 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1533 :
1534 : /* Else, already deleted by self; nothing to do */
1535 24 : return NULL;
1536 :
1537 1525492 : case TM_Ok:
1538 1525492 : break;
1539 :
1540 58 : case TM_Updated:
1541 : {
1542 : TupleTableSlot *inputslot;
1543 : TupleTableSlot *epqslot;
1544 :
1545 58 : if (IsolationUsesXactSnapshot())
1546 2 : ereport(ERROR,
1547 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1548 : errmsg("could not serialize access due to concurrent update")));
1549 :
1550 : /*
1551 : * Already know that we're going to need to do EPQ, so
1552 : * fetch tuple directly into the right slot.
1553 : */
1554 56 : EvalPlanQualBegin(context->epqstate);
1555 56 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1556 : resultRelInfo->ri_RangeTableIndex);
1557 :
1558 56 : result = table_tuple_lock(resultRelationDesc, tupleid,
1559 : estate->es_snapshot,
1560 : inputslot, estate->es_output_cid,
1561 : LockTupleExclusive, LockWaitBlock,
1562 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1563 : &context->tmfd);
1564 :
1565 52 : switch (result)
1566 : {
1567 46 : case TM_Ok:
1568 : Assert(context->tmfd.traversed);
1569 46 : epqslot = EvalPlanQual(context->epqstate,
1570 : resultRelationDesc,
1571 : resultRelInfo->ri_RangeTableIndex,
1572 : inputslot);
1573 46 : if (TupIsNull(epqslot))
1574 : /* Tuple not passing quals anymore, exiting... */
1575 30 : return NULL;
1576 :
1577 : /*
1578 : * If requested, skip delete and pass back the
1579 : * updated row.
1580 : */
1581 16 : if (epqreturnslot)
1582 : {
1583 12 : *epqreturnslot = epqslot;
1584 12 : return NULL;
1585 : }
1586 : else
1587 4 : goto ldelete;
1588 :
1589 4 : case TM_SelfModified:
1590 :
1591 : /*
1592 : * This can be reached when following an update
1593 : * chain from a tuple updated by another session,
1594 : * reaching a tuple that was already updated in
1595 : * this transaction. If previously updated by this
1596 : * command, ignore the delete, otherwise error
1597 : * out.
1598 : *
1599 : * See also TM_SelfModified response to
1600 : * table_tuple_delete() above.
1601 : */
1602 4 : if (context->tmfd.cmax != estate->es_output_cid)
1603 2 : ereport(ERROR,
1604 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1605 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1606 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1607 2 : return NULL;
1608 :
1609 2 : case TM_Deleted:
1610 : /* tuple already deleted; nothing to do */
1611 2 : return NULL;
1612 :
1613 0 : default:
1614 :
1615 : /*
1616 : * TM_Invisible should be impossible because we're
1617 : * waiting for updated row versions, and would
1618 : * already have errored out if the first version
1619 : * is invisible.
1620 : *
1621 : * TM_Updated should be impossible, because we're
1622 : * locking the latest version via
1623 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1624 : */
1625 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1626 : result);
1627 : return NULL;
1628 : }
1629 :
1630 : Assert(false);
1631 : break;
1632 : }
1633 :
1634 6 : case TM_Deleted:
1635 6 : if (IsolationUsesXactSnapshot())
1636 0 : ereport(ERROR,
1637 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1638 : errmsg("could not serialize access due to concurrent delete")));
1639 : /* tuple already deleted; nothing to do */
1640 6 : return NULL;
1641 :
1642 0 : default:
1643 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1644 : result);
1645 : return NULL;
1646 : }
1647 :
1648 : /*
1649 : * Note: Normally one would think that we have to delete index tuples
1650 : * associated with the heap tuple now...
1651 : *
1652 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1653 : * take care of it later. We can't delete index tuples immediately
1654 : * anyway, since the tuple is still visible to other transactions.
1655 : */
1656 : }
1657 :
1658 1525574 : if (canSetTag)
1659 1524454 : (estate->es_processed)++;
1660 :
1661 : /* Tell caller that the delete actually happened. */
1662 1525574 : if (tupleDeleted)
1663 892 : *tupleDeleted = true;
1664 :
1665 1525574 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1666 :
1667 : /* Process RETURNING if present and if requested */
1668 1525574 : if (processReturning && resultRelInfo->ri_projectReturning)
1669 : {
1670 : /*
1671 : * We have to put the target tuple into a slot, which means first we
1672 : * gotta fetch it. We can use the trigger tuple slot.
1673 : */
1674 : TupleTableSlot *rslot;
1675 :
1676 874 : if (resultRelInfo->ri_FdwRoutine)
1677 : {
1678 : /* FDW must have provided a slot containing the deleted row */
1679 : Assert(!TupIsNull(slot));
1680 : }
1681 : else
1682 : {
1683 868 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1684 868 : if (oldtuple != NULL)
1685 : {
1686 24 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1687 : }
1688 : else
1689 : {
1690 844 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1691 : SnapshotAny, slot))
1692 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1693 : }
1694 : }
1695 :
1696 874 : rslot = ExecProcessReturning(resultRelInfo, slot, context->planSlot);
1697 :
1698 : /*
1699 : * Before releasing the target tuple again, make sure rslot has a
1700 : * local copy of any pass-by-reference values.
1701 : */
1702 874 : ExecMaterializeSlot(rslot);
1703 :
1704 874 : ExecClearTuple(slot);
1705 :
1706 874 : return rslot;
1707 : }
1708 :
1709 1524700 : return NULL;
1710 : }
1711 :
1712 : /*
1713 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1714 : *
1715 : * This works by first deleting the old tuple from the current partition,
1716 : * followed by inserting the new tuple into the root parent table, that is,
1717 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1718 : * correct partition.
1719 : *
1720 : * Returns true if the tuple has been successfully moved, or if it's found
1721 : * that the tuple was concurrently deleted so there's nothing more to do
1722 : * for the caller.
1723 : *
1724 : * False is returned if the tuple we're trying to move is found to have been
1725 : * concurrently updated. In that case, the caller must check if the updated
1726 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1727 : * this function again or perform a regular update accordingly. For MERGE,
1728 : * the updated tuple is not returned in *retry_slot; it has its own retry
1729 : * logic.
1730 : */
1731 : static bool
1732 1012 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1733 : ResultRelInfo *resultRelInfo,
1734 : ItemPointer tupleid, HeapTuple oldtuple,
1735 : TupleTableSlot *slot,
1736 : bool canSetTag,
1737 : UpdateContext *updateCxt,
1738 : TM_Result *tmresult,
1739 : TupleTableSlot **retry_slot,
1740 : TupleTableSlot **inserted_tuple,
1741 : ResultRelInfo **insert_destrel)
1742 : {
1743 1012 : ModifyTableState *mtstate = context->mtstate;
1744 1012 : EState *estate = mtstate->ps.state;
1745 : TupleConversionMap *tupconv_map;
1746 : bool tuple_deleted;
1747 1012 : TupleTableSlot *epqslot = NULL;
1748 :
1749 1012 : context->cpUpdateReturningSlot = NULL;
1750 1012 : *retry_slot = NULL;
1751 :
1752 : /*
1753 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1754 : * to migrate to a different partition. Maybe this can be implemented
1755 : * some day, but it seems a fringe feature with little redeeming value.
1756 : */
1757 1012 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1758 0 : ereport(ERROR,
1759 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1760 : errmsg("invalid ON UPDATE specification"),
1761 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1762 :
1763 : /*
1764 : * When an UPDATE is run directly on a leaf partition, simply fail with a
1765 : * partition constraint violation error.
1766 : */
1767 1012 : if (resultRelInfo == mtstate->rootResultRelInfo)
1768 42 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1769 :
1770 : /* Initialize tuple routing info if not already done. */
1771 970 : if (mtstate->mt_partition_tuple_routing == NULL)
1772 : {
1773 602 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1774 : MemoryContext oldcxt;
1775 :
1776 : /* Things built here have to last for the query duration. */
1777 602 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1778 :
1779 602 : mtstate->mt_partition_tuple_routing =
1780 602 : ExecSetupPartitionTupleRouting(estate, rootRel);
1781 :
1782 : /*
1783 : * Before a partition's tuple can be re-routed, it must first be
1784 : * converted to the root's format, so we'll need a slot for storing
1785 : * such tuples.
1786 : */
1787 : Assert(mtstate->mt_root_tuple_slot == NULL);
1788 602 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1789 :
1790 602 : MemoryContextSwitchTo(oldcxt);
1791 : }
1792 :
1793 : /*
1794 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1795 : * We want to return rows from INSERT.
1796 : */
1797 970 : ExecDelete(context, resultRelInfo,
1798 : tupleid, oldtuple,
1799 : false, /* processReturning */
1800 : true, /* changingPart */
1801 : false, /* canSetTag */
1802 : tmresult, &tuple_deleted, &epqslot);
1803 :
1804 : /*
1805 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
1806 : * it was already deleted by self, or it was concurrently deleted by
1807 : * another transaction), then we should skip the insert as well;
1808 : * otherwise, an UPDATE could cause an increase in the total number of
1809 : * rows across all partitions, which is clearly wrong.
1810 : *
1811 : * For a normal UPDATE, the case where the tuple has been the subject of a
1812 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
1813 : * machinery, but for an UPDATE that we've translated into a DELETE from
1814 : * this partition and an INSERT into some other partition, that's not
1815 : * available, because CTID chains can't span relation boundaries. We
1816 : * mimic the semantics to a limited extent by skipping the INSERT if the
1817 : * DELETE fails to find a tuple. This ensures that two concurrent
1818 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
1819 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
1820 : * it.
1821 : */
1822 968 : if (!tuple_deleted)
1823 : {
1824 : /*
1825 : * epqslot will be typically NULL. But when ExecDelete() finds that
1826 : * another transaction has concurrently updated the same row, it
1827 : * re-fetches the row, skips the delete, and epqslot is set to the
1828 : * re-fetched tuple slot. In that case, we need to do all the checks
1829 : * again. For MERGE, we leave everything to the caller (it must do
1830 : * additional rechecking, and might end up executing a different
1831 : * action entirely).
1832 : */
1833 76 : if (mtstate->operation == CMD_MERGE)
1834 34 : return *tmresult == TM_Ok;
1835 42 : else if (TupIsNull(epqslot))
1836 36 : return true;
1837 : else
1838 : {
1839 : /* Fetch the most recent version of old tuple. */
1840 : TupleTableSlot *oldSlot;
1841 :
1842 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
1843 6 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
1844 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
1845 6 : oldSlot = resultRelInfo->ri_oldTupleSlot;
1846 6 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
1847 : tupleid,
1848 : SnapshotAny,
1849 : oldSlot))
1850 0 : elog(ERROR, "failed to fetch tuple being updated");
1851 : /* and project the new tuple to retry the UPDATE with */
1852 6 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
1853 : oldSlot);
1854 6 : return false;
1855 : }
1856 : }
1857 :
1858 : /*
1859 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
1860 : * convert the tuple into root's tuple descriptor if needed, since
1861 : * ExecInsert() starts the search from root.
1862 : */
1863 892 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1864 892 : if (tupconv_map != NULL)
1865 302 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1866 : slot,
1867 : mtstate->mt_root_tuple_slot);
1868 :
1869 : /* Tuple routing starts from the root table. */
1870 764 : context->cpUpdateReturningSlot =
1871 892 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
1872 : inserted_tuple, insert_destrel);
1873 :
1874 : /*
1875 : * Reset the transition state that may possibly have been written by
1876 : * INSERT.
1877 : */
1878 764 : if (mtstate->mt_transition_capture)
1879 42 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
1880 :
1881 : /* We're done moving. */
1882 764 : return true;
1883 : }
1884 :
1885 : /*
1886 : * ExecUpdatePrologue -- subroutine for ExecUpdate
1887 : *
1888 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
1889 : * triggers. We return false if one of them makes the update a no-op;
1890 : * otherwise, return true.
1891 : */
1892 : static bool
1893 314258 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1894 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1895 : TM_Result *result)
1896 : {
1897 314258 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1898 :
1899 314258 : if (result)
1900 2106 : *result = TM_Ok;
1901 :
1902 314258 : ExecMaterializeSlot(slot);
1903 :
1904 : /*
1905 : * Open the table's indexes, if we have not done so already, so that we
1906 : * can add new index entries for the updated tuple.
1907 : */
1908 314258 : if (resultRelationDesc->rd_rel->relhasindex &&
1909 223768 : resultRelInfo->ri_IndexRelationDescs == NULL)
1910 8478 : ExecOpenIndices(resultRelInfo, false);
1911 :
1912 : /* BEFORE ROW UPDATE triggers */
1913 314258 : if (resultRelInfo->ri_TrigDesc &&
1914 6154 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
1915 : {
1916 : /* Flush any pending inserts, so rows are visible to the triggers */
1917 2572 : if (context->estate->es_insert_pending_result_relations != NIL)
1918 2 : ExecPendingInserts(context->estate);
1919 :
1920 2572 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
1921 : resultRelInfo, tupleid, oldtuple, slot,
1922 : result, &context->tmfd);
1923 : }
1924 :
1925 311686 : return true;
1926 : }
1927 :
1928 : /*
1929 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
1930 : *
1931 : * Apply the final modifications to the tuple slot before the update.
1932 : * (This is split out because we also need it in the foreign-table code path.)
1933 : */
1934 : static void
1935 313978 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
1936 : TupleTableSlot *slot,
1937 : EState *estate)
1938 : {
1939 313978 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1940 :
1941 : /*
1942 : * Constraints and GENERATED expressions might reference the tableoid
1943 : * column, so (re-)initialize tts_tableOid before evaluating them.
1944 : */
1945 313978 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1946 :
1947 : /*
1948 : * Compute stored generated columns
1949 : */
1950 313978 : if (resultRelationDesc->rd_att->constr &&
1951 186910 : resultRelationDesc->rd_att->constr->has_generated_stored)
1952 260 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1953 : CMD_UPDATE);
1954 313978 : }
1955 :
1956 : /*
1957 : * ExecUpdateAct -- subroutine for ExecUpdate
1958 : *
1959 : * Actually update the tuple, when operating on a plain table. If the
1960 : * table is a partition, and the command was called referencing an ancestor
1961 : * partitioned table, this routine migrates the resulting tuple to another
1962 : * partition.
1963 : *
1964 : * The caller is in charge of keeping indexes current as necessary. The
1965 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
1966 : * be concurrently updated. However, in case of a cross-partition update,
1967 : * this routine does it.
1968 : */
1969 : static TM_Result
1970 313830 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1971 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1972 : bool canSetTag, UpdateContext *updateCxt)
1973 : {
1974 313830 : EState *estate = context->estate;
1975 313830 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1976 : bool partition_constraint_failed;
1977 : TM_Result result;
1978 :
1979 313830 : updateCxt->crossPartUpdate = false;
1980 :
1981 : /*
1982 : * If we move the tuple to a new partition, we loop back here to recompute
1983 : * GENERATED values (which are allowed to be different across partitions)
1984 : * and recheck any RLS policies and constraints. We do not fire any
1985 : * BEFORE triggers of the new partition, however.
1986 : */
1987 313836 : lreplace:
1988 : /* Fill in GENERATEd columns */
1989 313836 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
1990 :
1991 : /* ensure slot is independent, consider e.g. EPQ */
1992 313836 : ExecMaterializeSlot(slot);
1993 :
1994 : /*
1995 : * If partition constraint fails, this row might get moved to another
1996 : * partition, in which case we should check the RLS CHECK policy just
1997 : * before inserting into the new partition, rather than doing it here.
1998 : * This is because a trigger on that partition might again change the row.
1999 : * So skip the WCO checks if the partition constraint fails.
2000 : */
2001 313836 : partition_constraint_failed =
2002 316458 : resultRelationDesc->rd_rel->relispartition &&
2003 2622 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2004 :
2005 : /* Check any RLS UPDATE WITH CHECK policies */
2006 313836 : if (!partition_constraint_failed &&
2007 312824 : resultRelInfo->ri_WithCheckOptions != NIL)
2008 : {
2009 : /*
2010 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2011 : * we are looking for at this point.
2012 : */
2013 480 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2014 : resultRelInfo, slot, estate);
2015 : }
2016 :
2017 : /*
2018 : * If a partition check failed, try to move the row into the right
2019 : * partition.
2020 : */
2021 313782 : if (partition_constraint_failed)
2022 : {
2023 : TupleTableSlot *inserted_tuple,
2024 : *retry_slot;
2025 1012 : ResultRelInfo *insert_destrel = NULL;
2026 :
2027 : /*
2028 : * ExecCrossPartitionUpdate will first DELETE the row from the
2029 : * partition it's currently in and then insert it back into the root
2030 : * table, which will re-route it to the correct partition. However,
2031 : * if the tuple has been concurrently updated, a retry is needed.
2032 : */
2033 1012 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2034 : tupleid, oldtuple, slot,
2035 : canSetTag, updateCxt,
2036 : &result,
2037 : &retry_slot,
2038 : &inserted_tuple,
2039 : &insert_destrel))
2040 : {
2041 : /* success! */
2042 824 : updateCxt->crossPartUpdate = true;
2043 :
2044 : /*
2045 : * If the partitioned table being updated is referenced in foreign
2046 : * keys, queue up trigger events to check that none of them were
2047 : * violated. No special treatment is needed in
2048 : * non-cross-partition update situations, because the leaf
2049 : * partition's AR update triggers will take care of that. During
2050 : * cross-partition updates implemented as delete on the source
2051 : * partition followed by insert on the destination partition,
2052 : * AR-UPDATE triggers of the root table (that is, the table
2053 : * mentioned in the query) must be fired.
2054 : *
2055 : * NULL insert_destrel means that the move failed to occur, that
2056 : * is, the update failed, so no need to anything in that case.
2057 : */
2058 824 : if (insert_destrel &&
2059 736 : resultRelInfo->ri_TrigDesc &&
2060 338 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2061 276 : ExecCrossPartitionUpdateForeignKey(context,
2062 : resultRelInfo,
2063 : insert_destrel,
2064 : tupleid, slot,
2065 : inserted_tuple);
2066 :
2067 828 : return TM_Ok;
2068 : }
2069 :
2070 : /*
2071 : * No luck, a retry is needed. If running MERGE, we do not do so
2072 : * here; instead let it handle that on its own rules.
2073 : */
2074 16 : if (context->mtstate->operation == CMD_MERGE)
2075 10 : return result;
2076 :
2077 : /*
2078 : * ExecCrossPartitionUpdate installed an updated version of the new
2079 : * tuple in the retry slot; start over.
2080 : */
2081 6 : slot = retry_slot;
2082 6 : goto lreplace;
2083 : }
2084 :
2085 : /*
2086 : * Check the constraints of the tuple. We've already checked the
2087 : * partition constraint above; however, we must still ensure the tuple
2088 : * passes all other constraints, so we will call ExecConstraints() and
2089 : * have it validate all remaining checks.
2090 : */
2091 312770 : if (resultRelationDesc->rd_att->constr)
2092 186362 : ExecConstraints(resultRelInfo, slot, estate);
2093 :
2094 : /*
2095 : * replace the heap tuple
2096 : *
2097 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2098 : * the row to be updated is visible to that snapshot, and throw a
2099 : * can't-serialize error if not. This is a special-case behavior needed
2100 : * for referential integrity updates in transaction-snapshot mode
2101 : * transactions.
2102 : */
2103 312720 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2104 : estate->es_output_cid,
2105 : estate->es_snapshot,
2106 : estate->es_crosscheck_snapshot,
2107 : true /* wait for commit */ ,
2108 : &context->tmfd, &updateCxt->lockmode,
2109 : &updateCxt->updateIndexes);
2110 :
2111 312696 : return result;
2112 : }
2113 :
2114 : /*
2115 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2116 : *
2117 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2118 : * returns indicating that the tuple was updated.
2119 : */
2120 : static void
2121 312702 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2122 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2123 : HeapTuple oldtuple, TupleTableSlot *slot)
2124 : {
2125 312702 : ModifyTableState *mtstate = context->mtstate;
2126 312702 : List *recheckIndexes = NIL;
2127 :
2128 : /* insert index entries for tuple if necessary */
2129 312702 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2130 169562 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2131 : slot, context->estate,
2132 : true, false,
2133 : NULL, NIL,
2134 169562 : (updateCxt->updateIndexes == TU_Summarizing));
2135 :
2136 : /* AFTER ROW UPDATE Triggers */
2137 312678 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2138 : NULL, NULL,
2139 : tupleid, oldtuple, slot,
2140 : recheckIndexes,
2141 312678 : mtstate->operation == CMD_INSERT ?
2142 : mtstate->mt_oc_transition_capture :
2143 : mtstate->mt_transition_capture,
2144 : false);
2145 :
2146 312678 : list_free(recheckIndexes);
2147 :
2148 : /*
2149 : * Check any WITH CHECK OPTION constraints from parent views. We are
2150 : * required to do this after testing all constraints and uniqueness
2151 : * violations per the SQL spec, so we do it after actually updating the
2152 : * record in the heap and all indexes.
2153 : *
2154 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2155 : * are looking for at this point.
2156 : */
2157 312678 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2158 454 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2159 : slot, context->estate);
2160 312602 : }
2161 :
2162 : /*
2163 : * Queues up an update event using the target root partitioned table's
2164 : * trigger to check that a cross-partition update hasn't broken any foreign
2165 : * keys pointing into it.
2166 : */
2167 : static void
2168 276 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2169 : ResultRelInfo *sourcePartInfo,
2170 : ResultRelInfo *destPartInfo,
2171 : ItemPointer tupleid,
2172 : TupleTableSlot *oldslot,
2173 : TupleTableSlot *newslot)
2174 : {
2175 : ListCell *lc;
2176 : ResultRelInfo *rootRelInfo;
2177 : List *ancestorRels;
2178 :
2179 276 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2180 276 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2181 :
2182 : /*
2183 : * For any foreign keys that point directly into a non-root ancestors of
2184 : * the source partition, we can in theory fire an update event to enforce
2185 : * those constraints using their triggers, if we could tell that both the
2186 : * source and the destination partitions are under the same ancestor. But
2187 : * for now, we simply report an error that those cannot be enforced.
2188 : */
2189 606 : foreach(lc, ancestorRels)
2190 : {
2191 336 : ResultRelInfo *rInfo = lfirst(lc);
2192 336 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2193 336 : bool has_noncloned_fkey = false;
2194 :
2195 : /* Root ancestor's triggers will be processed. */
2196 336 : if (rInfo == rootRelInfo)
2197 270 : continue;
2198 :
2199 66 : if (trigdesc && trigdesc->trig_update_after_row)
2200 : {
2201 228 : for (int i = 0; i < trigdesc->numtriggers; i++)
2202 : {
2203 168 : Trigger *trig = &trigdesc->triggers[i];
2204 :
2205 174 : if (!trig->tgisclone &&
2206 6 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2207 : {
2208 6 : has_noncloned_fkey = true;
2209 6 : break;
2210 : }
2211 : }
2212 : }
2213 :
2214 66 : if (has_noncloned_fkey)
2215 6 : ereport(ERROR,
2216 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2217 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2218 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2219 : RelationGetRelationName(rInfo->ri_RelationDesc),
2220 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2221 : errhint("Consider defining the foreign key on table \"%s\".",
2222 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2223 : }
2224 :
2225 : /* Perform the root table's triggers. */
2226 270 : ExecARUpdateTriggers(context->estate,
2227 : rootRelInfo, sourcePartInfo, destPartInfo,
2228 : tupleid, NULL, newslot, NIL, NULL, true);
2229 270 : }
2230 :
2231 : /* ----------------------------------------------------------------
2232 : * ExecUpdate
2233 : *
2234 : * note: we can't run UPDATE queries with transactions
2235 : * off because UPDATEs are actually INSERTs and our
2236 : * scan will mistakenly loop forever, updating the tuple
2237 : * it just inserted.. This should be fixed but until it
2238 : * is, we don't want to get stuck in an infinite loop
2239 : * which corrupts your database..
2240 : *
2241 : * When updating a table, tupleid identifies the tuple to
2242 : * update and oldtuple is NULL. When updating a view, oldtuple
2243 : * is passed to the INSTEAD OF triggers and identifies what to
2244 : * update, and tupleid is invalid. When updating a foreign table,
2245 : * tupleid is invalid; the FDW has to figure out which row to
2246 : * update using data from the planSlot. oldtuple is passed to
2247 : * foreign table triggers; it is NULL when the foreign table has
2248 : * no relevant triggers.
2249 : *
2250 : * slot contains the new tuple value to be stored.
2251 : * planSlot is the output of the ModifyTable's subplan; we use it
2252 : * to access values from other input tables (for RETURNING),
2253 : * row-ID junk columns, etc.
2254 : *
2255 : * Returns RETURNING result if any, otherwise NULL.
2256 : * ----------------------------------------------------------------
2257 : */
2258 : static TupleTableSlot *
2259 312152 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2260 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2261 : bool canSetTag)
2262 : {
2263 312152 : EState *estate = context->estate;
2264 312152 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2265 312152 : UpdateContext updateCxt = {0};
2266 : TM_Result result;
2267 :
2268 : /*
2269 : * abort the operation if not running transactions
2270 : */
2271 312152 : if (IsBootstrapProcessingMode())
2272 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2273 :
2274 : /*
2275 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2276 : * done if it says we are.
2277 : */
2278 312152 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2279 138 : return NULL;
2280 :
2281 : /* INSTEAD OF ROW UPDATE Triggers */
2282 311978 : if (resultRelInfo->ri_TrigDesc &&
2283 5628 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2284 : {
2285 114 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2286 : oldtuple, slot))
2287 18 : return NULL; /* "do nothing" */
2288 : }
2289 311864 : else if (resultRelInfo->ri_FdwRoutine)
2290 : {
2291 : /* Fill in GENERATEd columns */
2292 142 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2293 :
2294 : /*
2295 : * update in foreign table: let the FDW do it
2296 : */
2297 142 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2298 : resultRelInfo,
2299 : slot,
2300 : context->planSlot);
2301 :
2302 142 : if (slot == NULL) /* "do nothing" */
2303 2 : return NULL;
2304 :
2305 : /*
2306 : * AFTER ROW Triggers or RETURNING expressions might reference the
2307 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2308 : * them. (This covers the case where the FDW replaced the slot.)
2309 : */
2310 140 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2311 : }
2312 : else
2313 : {
2314 : /*
2315 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2316 : * must loop back here to try again. (We don't need to redo triggers,
2317 : * however. If there are any BEFORE triggers then trigger.c will have
2318 : * done table_tuple_lock to lock the correct tuple, so there's no need
2319 : * to do them again.)
2320 : */
2321 311722 : redo_act:
2322 311820 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2323 : canSetTag, &updateCxt);
2324 :
2325 : /*
2326 : * If ExecUpdateAct reports that a cross-partition update was done,
2327 : * then the RETURNING tuple (if any) has been projected and there's
2328 : * nothing else for us to do.
2329 : */
2330 311534 : if (updateCxt.crossPartUpdate)
2331 684 : return context->cpUpdateReturningSlot;
2332 :
2333 310850 : switch (result)
2334 : {
2335 84 : case TM_SelfModified:
2336 :
2337 : /*
2338 : * The target tuple was already updated or deleted by the
2339 : * current command, or by a later command in the current
2340 : * transaction. The former case is possible in a join UPDATE
2341 : * where multiple tuples join to the same target tuple. This
2342 : * is pretty questionable, but Postgres has always allowed it:
2343 : * we just execute the first update action and ignore
2344 : * additional update attempts.
2345 : *
2346 : * The latter case arises if the tuple is modified by a
2347 : * command in a BEFORE trigger, or perhaps by a command in a
2348 : * volatile function used in the query. In such situations we
2349 : * should not ignore the update, but it is equally unsafe to
2350 : * proceed. We don't want to discard the original UPDATE
2351 : * while keeping the triggered actions based on it; and we
2352 : * have no principled way to merge this update with the
2353 : * previous ones. So throwing an error is the only safe
2354 : * course.
2355 : *
2356 : * If a trigger actually intends this type of interaction, it
2357 : * can re-execute the UPDATE (assuming it can figure out how)
2358 : * and then return NULL to cancel the outer update.
2359 : */
2360 84 : if (context->tmfd.cmax != estate->es_output_cid)
2361 6 : ereport(ERROR,
2362 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2363 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2364 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2365 :
2366 : /* Else, already updated by self; nothing to do */
2367 78 : return NULL;
2368 :
2369 310608 : case TM_Ok:
2370 310608 : break;
2371 :
2372 150 : case TM_Updated:
2373 : {
2374 : TupleTableSlot *inputslot;
2375 : TupleTableSlot *epqslot;
2376 : TupleTableSlot *oldSlot;
2377 :
2378 150 : if (IsolationUsesXactSnapshot())
2379 4 : ereport(ERROR,
2380 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2381 : errmsg("could not serialize access due to concurrent update")));
2382 :
2383 : /*
2384 : * Already know that we're going to need to do EPQ, so
2385 : * fetch tuple directly into the right slot.
2386 : */
2387 146 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2388 : resultRelInfo->ri_RangeTableIndex);
2389 :
2390 146 : result = table_tuple_lock(resultRelationDesc, tupleid,
2391 : estate->es_snapshot,
2392 : inputslot, estate->es_output_cid,
2393 : updateCxt.lockmode, LockWaitBlock,
2394 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2395 : &context->tmfd);
2396 :
2397 142 : switch (result)
2398 : {
2399 132 : case TM_Ok:
2400 : Assert(context->tmfd.traversed);
2401 :
2402 132 : epqslot = EvalPlanQual(context->epqstate,
2403 : resultRelationDesc,
2404 : resultRelInfo->ri_RangeTableIndex,
2405 : inputslot);
2406 132 : if (TupIsNull(epqslot))
2407 : /* Tuple not passing quals anymore, exiting... */
2408 34 : return NULL;
2409 :
2410 : /* Make sure ri_oldTupleSlot is initialized. */
2411 98 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2412 0 : ExecInitUpdateProjection(context->mtstate,
2413 : resultRelInfo);
2414 :
2415 : /* Fetch the most recent version of old tuple. */
2416 98 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2417 98 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2418 : tupleid,
2419 : SnapshotAny,
2420 : oldSlot))
2421 0 : elog(ERROR, "failed to fetch tuple being updated");
2422 98 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2423 : epqslot, oldSlot);
2424 98 : goto redo_act;
2425 :
2426 2 : case TM_Deleted:
2427 : /* tuple already deleted; nothing to do */
2428 2 : return NULL;
2429 :
2430 8 : case TM_SelfModified:
2431 :
2432 : /*
2433 : * This can be reached when following an update
2434 : * chain from a tuple updated by another session,
2435 : * reaching a tuple that was already updated in
2436 : * this transaction. If previously modified by
2437 : * this command, ignore the redundant update,
2438 : * otherwise error out.
2439 : *
2440 : * See also TM_SelfModified response to
2441 : * table_tuple_update() above.
2442 : */
2443 8 : if (context->tmfd.cmax != estate->es_output_cid)
2444 2 : ereport(ERROR,
2445 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2446 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2447 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2448 6 : return NULL;
2449 :
2450 0 : default:
2451 : /* see table_tuple_lock call in ExecDelete() */
2452 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2453 : result);
2454 : return NULL;
2455 : }
2456 : }
2457 :
2458 : break;
2459 :
2460 8 : case TM_Deleted:
2461 8 : if (IsolationUsesXactSnapshot())
2462 0 : ereport(ERROR,
2463 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2464 : errmsg("could not serialize access due to concurrent delete")));
2465 : /* tuple already deleted; nothing to do */
2466 8 : return NULL;
2467 :
2468 0 : default:
2469 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2470 : result);
2471 : return NULL;
2472 : }
2473 : }
2474 :
2475 310838 : if (canSetTag)
2476 310246 : (estate->es_processed)++;
2477 :
2478 310838 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2479 : slot);
2480 :
2481 : /* Process RETURNING if present */
2482 310750 : if (resultRelInfo->ri_projectReturning)
2483 2142 : return ExecProcessReturning(resultRelInfo, slot, context->planSlot);
2484 :
2485 308608 : return NULL;
2486 : }
2487 :
2488 : /*
2489 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2490 : *
2491 : * Try to lock tuple for update as part of speculative insertion. If
2492 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2493 : * (but still lock row, even though it may not satisfy estate's
2494 : * snapshot).
2495 : *
2496 : * Returns true if we're done (with or without an update), or false if
2497 : * the caller must retry the INSERT from scratch.
2498 : */
2499 : static bool
2500 5200 : ExecOnConflictUpdate(ModifyTableContext *context,
2501 : ResultRelInfo *resultRelInfo,
2502 : ItemPointer conflictTid,
2503 : TupleTableSlot *excludedSlot,
2504 : bool canSetTag,
2505 : TupleTableSlot **returning)
2506 : {
2507 5200 : ModifyTableState *mtstate = context->mtstate;
2508 5200 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2509 5200 : Relation relation = resultRelInfo->ri_RelationDesc;
2510 5200 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2511 5200 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2512 : TM_FailureData tmfd;
2513 : LockTupleMode lockmode;
2514 : TM_Result test;
2515 : Datum xminDatum;
2516 : TransactionId xmin;
2517 : bool isnull;
2518 :
2519 : /* Determine lock mode to use */
2520 5200 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2521 :
2522 : /*
2523 : * Lock tuple for update. Don't follow updates when tuple cannot be
2524 : * locked without doing so. A row locking conflict here means our
2525 : * previous conclusion that the tuple is conclusively committed is not
2526 : * true anymore.
2527 : */
2528 5200 : test = table_tuple_lock(relation, conflictTid,
2529 5200 : context->estate->es_snapshot,
2530 5200 : existing, context->estate->es_output_cid,
2531 : lockmode, LockWaitBlock, 0,
2532 : &tmfd);
2533 5200 : switch (test)
2534 : {
2535 5176 : case TM_Ok:
2536 : /* success! */
2537 5176 : break;
2538 :
2539 24 : case TM_Invisible:
2540 :
2541 : /*
2542 : * This can occur when a just inserted tuple is updated again in
2543 : * the same command. E.g. because multiple rows with the same
2544 : * conflicting key values are inserted.
2545 : *
2546 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2547 : * case. We do not want to proceed because it would lead to the
2548 : * same row being updated a second time in some unspecified order,
2549 : * and in contrast to plain UPDATEs there's no historical behavior
2550 : * to break.
2551 : *
2552 : * It is the user's responsibility to prevent this situation from
2553 : * occurring. These problems are why the SQL standard similarly
2554 : * specifies that for SQL MERGE, an exception must be raised in
2555 : * the event of an attempt to update the same row twice.
2556 : */
2557 24 : xminDatum = slot_getsysattr(existing,
2558 : MinTransactionIdAttributeNumber,
2559 : &isnull);
2560 : Assert(!isnull);
2561 24 : xmin = DatumGetTransactionId(xminDatum);
2562 :
2563 24 : if (TransactionIdIsCurrentTransactionId(xmin))
2564 24 : ereport(ERROR,
2565 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2566 : /* translator: %s is a SQL command name */
2567 : errmsg("%s command cannot affect row a second time",
2568 : "ON CONFLICT DO UPDATE"),
2569 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2570 :
2571 : /* This shouldn't happen */
2572 0 : elog(ERROR, "attempted to lock invisible tuple");
2573 : break;
2574 :
2575 0 : case TM_SelfModified:
2576 :
2577 : /*
2578 : * This state should never be reached. As a dirty snapshot is used
2579 : * to find conflicting tuples, speculative insertion wouldn't have
2580 : * seen this row to conflict with.
2581 : */
2582 0 : elog(ERROR, "unexpected self-updated tuple");
2583 : break;
2584 :
2585 0 : case TM_Updated:
2586 0 : if (IsolationUsesXactSnapshot())
2587 0 : ereport(ERROR,
2588 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2589 : errmsg("could not serialize access due to concurrent update")));
2590 :
2591 : /*
2592 : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2593 : * a partitioned table we shouldn't reach to a case where tuple to
2594 : * be lock is moved to another partition due to concurrent update
2595 : * of the partition key.
2596 : */
2597 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2598 :
2599 : /*
2600 : * Tell caller to try again from the very start.
2601 : *
2602 : * It does not make sense to use the usual EvalPlanQual() style
2603 : * loop here, as the new version of the row might not conflict
2604 : * anymore, or the conflicting tuple has actually been deleted.
2605 : */
2606 0 : ExecClearTuple(existing);
2607 0 : return false;
2608 :
2609 0 : case TM_Deleted:
2610 0 : if (IsolationUsesXactSnapshot())
2611 0 : ereport(ERROR,
2612 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2613 : errmsg("could not serialize access due to concurrent delete")));
2614 :
2615 : /* see TM_Updated case */
2616 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2617 0 : ExecClearTuple(existing);
2618 0 : return false;
2619 :
2620 0 : default:
2621 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2622 : }
2623 :
2624 : /* Success, the tuple is locked. */
2625 :
2626 : /*
2627 : * Verify that the tuple is visible to our MVCC snapshot if the current
2628 : * isolation level mandates that.
2629 : *
2630 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2631 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2632 : *
2633 : * This means we only ever continue when a new command in the current
2634 : * transaction could see the row, even though in READ COMMITTED mode the
2635 : * tuple will not be visible according to the current statement's
2636 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2637 : * versions.
2638 : */
2639 5176 : ExecCheckTupleVisible(context->estate, relation, existing);
2640 :
2641 : /*
2642 : * Make tuple and any needed join variables available to ExecQual and
2643 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2644 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2645 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2646 : * other redirection.
2647 : */
2648 5176 : econtext->ecxt_scantuple = existing;
2649 5176 : econtext->ecxt_innertuple = excludedSlot;
2650 5176 : econtext->ecxt_outertuple = NULL;
2651 :
2652 5176 : if (!ExecQual(onConflictSetWhere, econtext))
2653 : {
2654 32 : ExecClearTuple(existing); /* see return below */
2655 32 : InstrCountFiltered1(&mtstate->ps, 1);
2656 32 : return true; /* done with the tuple */
2657 : }
2658 :
2659 5144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2660 : {
2661 : /*
2662 : * Check target's existing tuple against UPDATE-applicable USING
2663 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2664 : *
2665 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2666 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2667 : * but that's almost the extent of its special handling for ON
2668 : * CONFLICT DO UPDATE.
2669 : *
2670 : * The rewriter will also have associated UPDATE applicable straight
2671 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2672 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2673 : * kinds, so there is no danger of spurious over-enforcement in the
2674 : * INSERT or UPDATE path.
2675 : */
2676 60 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2677 : existing,
2678 : mtstate->ps.state);
2679 : }
2680 :
2681 : /* Project the new tuple version */
2682 5120 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2683 :
2684 : /*
2685 : * Note that it is possible that the target tuple has been modified in
2686 : * this session, after the above table_tuple_lock. We choose to not error
2687 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2688 : * This can happen if an UPDATE is triggered from within ExecQual(),
2689 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2690 : * wCTE in the ON CONFLICT's SET.
2691 : */
2692 :
2693 : /* Execute UPDATE with projection */
2694 10210 : *returning = ExecUpdate(context, resultRelInfo,
2695 : conflictTid, NULL,
2696 5120 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2697 : canSetTag);
2698 :
2699 : /*
2700 : * Clear out existing tuple, as there might not be another conflict among
2701 : * the next input rows. Don't want to hold resources till the end of the
2702 : * query.
2703 : */
2704 5090 : ExecClearTuple(existing);
2705 5090 : return true;
2706 : }
2707 :
2708 : /*
2709 : * Perform MERGE.
2710 : */
2711 : static TupleTableSlot *
2712 7012 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2713 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2714 : {
2715 7012 : TupleTableSlot *rslot = NULL;
2716 : bool matched;
2717 :
2718 : /*-----
2719 : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2720 : * valid, depending on whether the result relation is a table or a view.
2721 : * We execute the first action for which the additional WHEN MATCHED AND
2722 : * quals pass. If an action without quals is found, that action is
2723 : * executed.
2724 : *
2725 : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2726 : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2727 : * in sequence until one passes. This is almost identical to the WHEN
2728 : * MATCHED case, and both cases are handled by ExecMergeMatched().
2729 : *
2730 : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2731 : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2732 : * TARGET] actions in sequence until one passes.
2733 : *
2734 : * Things get interesting in case of concurrent update/delete of the
2735 : * target tuple. Such concurrent update/delete is detected while we are
2736 : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2737 : *
2738 : * A concurrent update can:
2739 : *
2740 : * 1. modify the target tuple so that the results from checking any
2741 : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2742 : * SOURCE actions potentially change, but the result from the join
2743 : * quals does not change.
2744 : *
2745 : * In this case, we are still dealing with the same kind of match
2746 : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2747 : * actions from the start and choose the first one that satisfies the
2748 : * new target tuple.
2749 : *
2750 : * 2. modify the target tuple in the WHEN MATCHED case so that the join
2751 : * quals no longer pass and hence the source and target tuples no
2752 : * longer match.
2753 : *
2754 : * In this case, we are now dealing with a NOT MATCHED case, and we
2755 : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2756 : * TARGET] actions. First ExecMergeMatched() processes the list of
2757 : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2758 : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2759 : * TARGET] actions in sequence until one passes. Thus we may execute
2760 : * two actions; one of each kind.
2761 : *
2762 : * Thus we support concurrent updates that turn MATCHED candidate rows
2763 : * into NOT MATCHED rows. However, we do not attempt to support cases
2764 : * that would turn NOT MATCHED rows into MATCHED rows, or which would
2765 : * cause a target row to match a different source row.
2766 : *
2767 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2768 : * [BY TARGET].
2769 : *
2770 : * ExecMergeMatched() takes care of following the update chain and
2771 : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
2772 : * action, as long as the target tuple still exists. If the target tuple
2773 : * gets deleted or a concurrent update causes the join quals to fail, it
2774 : * returns a matched status of false and we call ExecMergeNotMatched().
2775 : * Given that ExecMergeMatched() always makes progress by following the
2776 : * update chain and we never switch from ExecMergeNotMatched() to
2777 : * ExecMergeMatched(), there is no risk of a livelock.
2778 : */
2779 7012 : matched = tupleid != NULL || oldtuple != NULL;
2780 7012 : if (matched)
2781 4392 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
2782 : canSetTag, &matched);
2783 :
2784 : /*
2785 : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
2786 : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
2787 : * "matched" to false, indicating that it no longer matches).
2788 : */
2789 6922 : if (!matched)
2790 : {
2791 : /*
2792 : * If a concurrent update turned a MATCHED case into a NOT MATCHED
2793 : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
2794 : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
2795 : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
2796 : * SOURCE action, and computed the row to return. If so, we cannot
2797 : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
2798 : * pending (to be processed on the next call to ExecModifyTable()).
2799 : * Otherwise, just process the action now.
2800 : */
2801 2636 : if (rslot == NULL)
2802 2634 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
2803 : else
2804 2 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
2805 : }
2806 :
2807 6868 : return rslot;
2808 : }
2809 :
2810 : /*
2811 : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
2812 : * action, depending on whether the join quals are satisfied. If the target
2813 : * relation is a table, the current target tuple is identified by tupleid.
2814 : * Otherwise, if the target relation is a view, oldtuple is the current target
2815 : * tuple from the view.
2816 : *
2817 : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
2818 : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
2819 : * action do not pass, we check the second, then the third and so on. If we
2820 : * reach the end without finding a qualifying action, we return NULL.
2821 : * Otherwise, we execute the qualifying action and return its RETURNING
2822 : * result, if any, or NULL.
2823 : *
2824 : * On entry, "*matched" is assumed to be true. If a concurrent update or
2825 : * delete is detected that causes the join quals to no longer pass, we set it
2826 : * to false, indicating that the caller should process any NOT MATCHED [BY
2827 : * TARGET] actions.
2828 : *
2829 : * After a concurrent update, we restart from the first action to look for a
2830 : * new qualifying action to execute. If the join quals originally passed, and
2831 : * the concurrent update caused them to no longer pass, then we switch from
2832 : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
2833 : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
2834 : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
2835 : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
2836 : */
2837 : static TupleTableSlot *
2838 4392 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2839 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
2840 : bool *matched)
2841 : {
2842 4392 : ModifyTableState *mtstate = context->mtstate;
2843 4392 : List **mergeActions = resultRelInfo->ri_MergeActions;
2844 : List *actionStates;
2845 4392 : TupleTableSlot *newslot = NULL;
2846 4392 : TupleTableSlot *rslot = NULL;
2847 4392 : EState *estate = context->estate;
2848 4392 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2849 : bool isNull;
2850 4392 : EPQState *epqstate = &mtstate->mt_epqstate;
2851 : ListCell *l;
2852 :
2853 : /* Expect matched to be true on entry */
2854 : Assert(*matched);
2855 :
2856 : /*
2857 : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
2858 : * are done.
2859 : */
2860 4392 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
2861 1200 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
2862 528 : return NULL;
2863 :
2864 : /*
2865 : * Make tuple and any needed join variables available to ExecQual and
2866 : * ExecProject. The target's existing tuple is installed in the scantuple.
2867 : * This target relation's slot is required only in the case of a MATCHED
2868 : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
2869 : */
2870 3864 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
2871 3864 : econtext->ecxt_innertuple = context->planSlot;
2872 3864 : econtext->ecxt_outertuple = NULL;
2873 :
2874 : /*
2875 : * This routine is only invoked for matched target rows, so we should
2876 : * either have the tupleid of the target row, or an old tuple from the
2877 : * target wholerow junk attr.
2878 : */
2879 : Assert(tupleid != NULL || oldtuple != NULL);
2880 3864 : if (oldtuple != NULL)
2881 84 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
2882 : false);
2883 3780 : else if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2884 : tupleid,
2885 : SnapshotAny,
2886 : resultRelInfo->ri_oldTupleSlot))
2887 0 : elog(ERROR, "failed to fetch the target tuple");
2888 :
2889 : /*
2890 : * Test the join condition. If it's satisfied, perform a MATCHED action.
2891 : * Otherwise, perform a NOT MATCHED BY SOURCE action.
2892 : *
2893 : * Note that this join condition will be NULL if there are no NOT MATCHED
2894 : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
2895 : * need only consider MATCHED actions here.
2896 : */
2897 3864 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
2898 3694 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
2899 : else
2900 170 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
2901 :
2902 3864 : lmerge_matched:
2903 :
2904 5392 : foreach(l, actionStates)
2905 : {
2906 3990 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
2907 3990 : CmdType commandType = relaction->mas_action->commandType;
2908 : TM_Result result;
2909 3990 : UpdateContext updateCxt = {0};
2910 :
2911 : /*
2912 : * Test condition, if any.
2913 : *
2914 : * In the absence of any condition, we perform the action
2915 : * unconditionally (no need to check separately since ExecQual() will
2916 : * return true if there are no conditions to evaluate).
2917 : */
2918 3990 : if (!ExecQual(relaction->mas_whenqual, econtext))
2919 1466 : continue;
2920 :
2921 : /*
2922 : * Check if the existing target tuple meets the USING checks of
2923 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
2924 : * error.
2925 : *
2926 : * The WITH CHECK quals for UPDATE RLS policies are applied in
2927 : * ExecUpdateAct() and hence we need not do anything special to handle
2928 : * them.
2929 : *
2930 : * NOTE: We must do this after WHEN quals are evaluated, so that we
2931 : * check policies only when they matter.
2932 : */
2933 2524 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
2934 : {
2935 90 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
2936 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
2937 : resultRelInfo,
2938 : resultRelInfo->ri_oldTupleSlot,
2939 90 : context->mtstate->ps.state);
2940 : }
2941 :
2942 : /* Perform stated action */
2943 2500 : switch (commandType)
2944 : {
2945 2106 : case CMD_UPDATE:
2946 :
2947 : /*
2948 : * Project the output tuple, and use that to update the table.
2949 : * We don't need to filter out junk attributes, because the
2950 : * UPDATE action's targetlist doesn't have any.
2951 : */
2952 2106 : newslot = ExecProject(relaction->mas_proj);
2953 :
2954 2106 : mtstate->mt_merge_action = relaction;
2955 2106 : if (!ExecUpdatePrologue(context, resultRelInfo,
2956 : tupleid, NULL, newslot, &result))
2957 : {
2958 18 : if (result == TM_Ok)
2959 156 : return NULL; /* "do nothing" */
2960 :
2961 12 : break; /* concurrent update/delete */
2962 : }
2963 :
2964 : /* INSTEAD OF ROW UPDATE Triggers */
2965 2088 : if (resultRelInfo->ri_TrigDesc &&
2966 334 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2967 : {
2968 78 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2969 : oldtuple, newslot))
2970 0 : return NULL; /* "do nothing" */
2971 : }
2972 : else
2973 : {
2974 2010 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
2975 : NULL, newslot, canSetTag,
2976 : &updateCxt);
2977 :
2978 : /*
2979 : * As in ExecUpdate(), if ExecUpdateAct() reports that a
2980 : * cross-partition update was done, then there's nothing
2981 : * else for us to do --- the UPDATE has been turned into a
2982 : * DELETE and an INSERT, and we must not perform any of
2983 : * the usual post-update tasks. Also, the RETURNING tuple
2984 : * (if any) has been projected, so we can just return
2985 : * that.
2986 : */
2987 1990 : if (updateCxt.crossPartUpdate)
2988 : {
2989 134 : mtstate->mt_merge_updated += 1;
2990 134 : return context->cpUpdateReturningSlot;
2991 : }
2992 : }
2993 :
2994 1934 : if (result == TM_Ok)
2995 : {
2996 1864 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
2997 : tupleid, NULL, newslot);
2998 1852 : mtstate->mt_merge_updated += 1;
2999 : }
3000 1922 : break;
3001 :
3002 376 : case CMD_DELETE:
3003 376 : mtstate->mt_merge_action = relaction;
3004 376 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3005 : NULL, NULL, &result))
3006 : {
3007 12 : if (result == TM_Ok)
3008 6 : return NULL; /* "do nothing" */
3009 :
3010 6 : break; /* concurrent update/delete */
3011 : }
3012 :
3013 : /* INSTEAD OF ROW DELETE Triggers */
3014 364 : if (resultRelInfo->ri_TrigDesc &&
3015 44 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3016 : {
3017 6 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3018 : oldtuple))
3019 0 : return NULL; /* "do nothing" */
3020 : }
3021 : else
3022 358 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3023 : false);
3024 :
3025 364 : if (result == TM_Ok)
3026 : {
3027 346 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3028 : false);
3029 346 : mtstate->mt_merge_deleted += 1;
3030 : }
3031 364 : break;
3032 :
3033 18 : case CMD_NOTHING:
3034 : /* Doing nothing is always OK */
3035 18 : result = TM_Ok;
3036 18 : break;
3037 :
3038 0 : default:
3039 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3040 : }
3041 :
3042 2322 : switch (result)
3043 : {
3044 2216 : case TM_Ok:
3045 : /* all good; perform final actions */
3046 2216 : if (canSetTag && commandType != CMD_NOTHING)
3047 2180 : (estate->es_processed)++;
3048 :
3049 2216 : break;
3050 :
3051 32 : case TM_SelfModified:
3052 :
3053 : /*
3054 : * The target tuple was already updated or deleted by the
3055 : * current command, or by a later command in the current
3056 : * transaction. The former case is explicitly disallowed by
3057 : * the SQL standard for MERGE, which insists that the MERGE
3058 : * join condition should not join a target row to more than
3059 : * one source row.
3060 : *
3061 : * The latter case arises if the tuple is modified by a
3062 : * command in a BEFORE trigger, or perhaps by a command in a
3063 : * volatile function used in the query. In such situations we
3064 : * should not ignore the MERGE action, but it is equally
3065 : * unsafe to proceed. We don't want to discard the original
3066 : * MERGE action while keeping the triggered actions based on
3067 : * it; and it would be no better to allow the original MERGE
3068 : * action while discarding the updates that it triggered. So
3069 : * throwing an error is the only safe course.
3070 : */
3071 32 : if (context->tmfd.cmax != estate->es_output_cid)
3072 12 : ereport(ERROR,
3073 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3074 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3075 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3076 :
3077 20 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3078 20 : ereport(ERROR,
3079 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3080 : /* translator: %s is a SQL command name */
3081 : errmsg("%s command cannot affect row a second time",
3082 : "MERGE"),
3083 : errhint("Ensure that not more than one source row matches any one target row.")));
3084 :
3085 : /* This shouldn't happen */
3086 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3087 : break;
3088 :
3089 10 : case TM_Deleted:
3090 10 : if (IsolationUsesXactSnapshot())
3091 0 : ereport(ERROR,
3092 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3093 : errmsg("could not serialize access due to concurrent delete")));
3094 :
3095 : /*
3096 : * If the tuple was already deleted, set matched to false to
3097 : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3098 : */
3099 10 : *matched = false;
3100 10 : return NULL;
3101 :
3102 64 : case TM_Updated:
3103 : {
3104 : bool was_matched;
3105 : Relation resultRelationDesc;
3106 : TupleTableSlot *epqslot,
3107 : *inputslot;
3108 : LockTupleMode lockmode;
3109 :
3110 : /*
3111 : * The target tuple was concurrently updated by some other
3112 : * transaction. If we are currently processing a MATCHED
3113 : * action, use EvalPlanQual() with the new version of the
3114 : * tuple and recheck the join qual, to detect a change
3115 : * from the MATCHED to the NOT MATCHED cases. If we are
3116 : * already processing a NOT MATCHED BY SOURCE action, we
3117 : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3118 : * MATCHED).
3119 : */
3120 64 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3121 64 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3122 64 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3123 :
3124 64 : if (was_matched)
3125 64 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3126 : resultRelInfo->ri_RangeTableIndex);
3127 : else
3128 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3129 :
3130 64 : result = table_tuple_lock(resultRelationDesc, tupleid,
3131 : estate->es_snapshot,
3132 : inputslot, estate->es_output_cid,
3133 : lockmode, LockWaitBlock,
3134 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3135 : &context->tmfd);
3136 64 : switch (result)
3137 : {
3138 62 : case TM_Ok:
3139 :
3140 : /*
3141 : * If the tuple was updated and migrated to
3142 : * another partition concurrently, the current
3143 : * MERGE implementation can't follow. There's
3144 : * probably a better way to handle this case, but
3145 : * it'd require recognizing the relation to which
3146 : * the tuple moved, and setting our current
3147 : * resultRelInfo to that.
3148 : */
3149 62 : if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid))
3150 0 : ereport(ERROR,
3151 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3152 : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3153 :
3154 : /*
3155 : * If this was a MATCHED case, use EvalPlanQual()
3156 : * to recheck the join condition.
3157 : */
3158 62 : if (was_matched)
3159 : {
3160 62 : epqslot = EvalPlanQual(epqstate,
3161 : resultRelationDesc,
3162 : resultRelInfo->ri_RangeTableIndex,
3163 : inputslot);
3164 :
3165 : /*
3166 : * If the subplan didn't return a tuple, then
3167 : * we must be dealing with an inner join for
3168 : * which the join condition no longer matches.
3169 : * This can only happen if there are no NOT
3170 : * MATCHED actions, and so there is nothing
3171 : * more to do.
3172 : */
3173 62 : if (TupIsNull(epqslot))
3174 0 : return NULL;
3175 :
3176 : /*
3177 : * If we got a NULL ctid from the subplan, the
3178 : * join quals no longer pass and we switch to
3179 : * the NOT MATCHED BY SOURCE case.
3180 : */
3181 62 : (void) ExecGetJunkAttribute(epqslot,
3182 62 : resultRelInfo->ri_RowIdAttNo,
3183 : &isNull);
3184 62 : if (isNull)
3185 4 : *matched = false;
3186 :
3187 : /*
3188 : * Otherwise, recheck the join quals to see if
3189 : * we need to switch to the NOT MATCHED BY
3190 : * SOURCE case.
3191 : */
3192 62 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3193 : &context->tmfd.ctid,
3194 : SnapshotAny,
3195 : resultRelInfo->ri_oldTupleSlot))
3196 0 : elog(ERROR, "failed to fetch the target tuple");
3197 :
3198 62 : if (*matched)
3199 58 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3200 : econtext);
3201 :
3202 : /* Switch lists, if necessary */
3203 62 : if (!*matched)
3204 6 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3205 : }
3206 :
3207 : /*
3208 : * Loop back and process the MATCHED or NOT
3209 : * MATCHED BY SOURCE actions from the start.
3210 : */
3211 62 : goto lmerge_matched;
3212 :
3213 0 : case TM_Deleted:
3214 :
3215 : /*
3216 : * tuple already deleted; tell caller to run NOT
3217 : * MATCHED [BY TARGET] actions
3218 : */
3219 0 : *matched = false;
3220 0 : return NULL;
3221 :
3222 2 : case TM_SelfModified:
3223 :
3224 : /*
3225 : * This can be reached when following an update
3226 : * chain from a tuple updated by another session,
3227 : * reaching a tuple that was already updated or
3228 : * deleted by the current command, or by a later
3229 : * command in the current transaction. As above,
3230 : * this should always be treated as an error.
3231 : */
3232 2 : if (context->tmfd.cmax != estate->es_output_cid)
3233 0 : ereport(ERROR,
3234 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3235 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3236 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3237 :
3238 2 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3239 2 : ereport(ERROR,
3240 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3241 : /* translator: %s is a SQL command name */
3242 : errmsg("%s command cannot affect row a second time",
3243 : "MERGE"),
3244 : errhint("Ensure that not more than one source row matches any one target row.")));
3245 :
3246 : /* This shouldn't happen */
3247 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3248 : return NULL;
3249 :
3250 0 : default:
3251 : /* see table_tuple_lock call in ExecDelete() */
3252 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3253 : result);
3254 : return NULL;
3255 : }
3256 : }
3257 :
3258 0 : case TM_Invisible:
3259 : case TM_WouldBlock:
3260 : case TM_BeingModified:
3261 : /* these should not occur */
3262 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3263 : break;
3264 : }
3265 :
3266 : /* Process RETURNING if present */
3267 2216 : if (resultRelInfo->ri_projectReturning)
3268 : {
3269 240 : switch (commandType)
3270 : {
3271 144 : case CMD_UPDATE:
3272 144 : rslot = ExecProcessReturning(resultRelInfo, newslot,
3273 : context->planSlot);
3274 144 : break;
3275 :
3276 96 : case CMD_DELETE:
3277 96 : rslot = ExecProcessReturning(resultRelInfo,
3278 : resultRelInfo->ri_oldTupleSlot,
3279 : context->planSlot);
3280 96 : break;
3281 :
3282 0 : case CMD_NOTHING:
3283 0 : break;
3284 :
3285 0 : default:
3286 0 : elog(ERROR, "unrecognized commandType: %d",
3287 : (int) commandType);
3288 : }
3289 : }
3290 :
3291 : /*
3292 : * We've activated one of the WHEN clauses, so we don't search
3293 : * further. This is required behaviour, not an optimization.
3294 : */
3295 2216 : break;
3296 : }
3297 :
3298 : /*
3299 : * Successfully executed an action or no qualifying action was found.
3300 : */
3301 3618 : return rslot;
3302 : }
3303 :
3304 : /*
3305 : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3306 : */
3307 : static TupleTableSlot *
3308 2636 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3309 : bool canSetTag)
3310 : {
3311 2636 : ModifyTableState *mtstate = context->mtstate;
3312 2636 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3313 : List *actionStates;
3314 2636 : TupleTableSlot *rslot = NULL;
3315 : ListCell *l;
3316 :
3317 : /*
3318 : * For INSERT actions, the root relation's merge action is OK since the
3319 : * INSERT's targetlist and the WHEN conditions can only refer to the
3320 : * source relation and hence it does not matter which result relation we
3321 : * work with.
3322 : *
3323 : * XXX does this mean that we can avoid creating copies of actionStates on
3324 : * partitioned tables, for not-matched actions?
3325 : */
3326 2636 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3327 :
3328 : /*
3329 : * Make source tuple available to ExecQual and ExecProject. We don't need
3330 : * the target tuple, since the WHEN quals and targetlist can't refer to
3331 : * the target columns.
3332 : */
3333 2636 : econtext->ecxt_scantuple = NULL;
3334 2636 : econtext->ecxt_innertuple = context->planSlot;
3335 2636 : econtext->ecxt_outertuple = NULL;
3336 :
3337 3506 : foreach(l, actionStates)
3338 : {
3339 2636 : MergeActionState *action = (MergeActionState *) lfirst(l);
3340 2636 : CmdType commandType = action->mas_action->commandType;
3341 : TupleTableSlot *newslot;
3342 :
3343 : /*
3344 : * Test condition, if any.
3345 : *
3346 : * In the absence of any condition, we perform the action
3347 : * unconditionally (no need to check separately since ExecQual() will
3348 : * return true if there are no conditions to evaluate).
3349 : */
3350 2636 : if (!ExecQual(action->mas_whenqual, econtext))
3351 870 : continue;
3352 :
3353 : /* Perform stated action */
3354 1766 : switch (commandType)
3355 : {
3356 1766 : case CMD_INSERT:
3357 :
3358 : /*
3359 : * Project the tuple. In case of a partitioned table, the
3360 : * projection was already built to use the root's descriptor,
3361 : * so we don't need to map the tuple here.
3362 : */
3363 1766 : newslot = ExecProject(action->mas_proj);
3364 1766 : mtstate->mt_merge_action = action;
3365 :
3366 1766 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3367 : newslot, canSetTag, NULL, NULL);
3368 1712 : mtstate->mt_merge_inserted += 1;
3369 1712 : break;
3370 0 : case CMD_NOTHING:
3371 : /* Do nothing */
3372 0 : break;
3373 0 : default:
3374 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3375 : }
3376 :
3377 : /*
3378 : * We've activated one of the WHEN clauses, so we don't search
3379 : * further. This is required behaviour, not an optimization.
3380 : */
3381 1712 : break;
3382 : }
3383 :
3384 2582 : return rslot;
3385 : }
3386 :
3387 : /*
3388 : * Initialize state for execution of MERGE.
3389 : */
3390 : void
3391 1378 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3392 : {
3393 1378 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3394 1378 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3395 : ResultRelInfo *resultRelInfo;
3396 : ExprContext *econtext;
3397 : ListCell *lc;
3398 : int i;
3399 :
3400 1378 : if (node->mergeActionLists == NIL)
3401 0 : return;
3402 :
3403 1378 : mtstate->mt_merge_subcommands = 0;
3404 :
3405 1378 : if (mtstate->ps.ps_ExprContext == NULL)
3406 1228 : ExecAssignExprContext(estate, &mtstate->ps);
3407 1378 : econtext = mtstate->ps.ps_ExprContext;
3408 :
3409 : /*
3410 : * Create a MergeActionState for each action on the mergeActionList and
3411 : * add it to either a list of matched actions or not-matched actions.
3412 : *
3413 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3414 : * anything here, do so there too.
3415 : */
3416 1378 : i = 0;
3417 2988 : foreach(lc, node->mergeActionLists)
3418 : {
3419 1610 : List *mergeActionList = lfirst(lc);
3420 : Node *joinCondition;
3421 : TupleDesc relationDesc;
3422 : ListCell *l;
3423 :
3424 1610 : joinCondition = (Node *) list_nth(node->mergeJoinConditions, i);
3425 1610 : resultRelInfo = mtstate->resultRelInfo + i;
3426 1610 : i++;
3427 1610 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3428 :
3429 : /* initialize slots for MERGE fetches from this rel */
3430 1610 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3431 1610 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3432 :
3433 : /* initialize state for join condition checking */
3434 1610 : resultRelInfo->ri_MergeJoinCondition =
3435 1610 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3436 :
3437 4498 : foreach(l, mergeActionList)
3438 : {
3439 2888 : MergeAction *action = (MergeAction *) lfirst(l);
3440 : MergeActionState *action_state;
3441 : TupleTableSlot *tgtslot;
3442 : TupleDesc tgtdesc;
3443 :
3444 : /*
3445 : * Build action merge state for this rel. (For partitions,
3446 : * equivalent code exists in ExecInitPartitionInfo.)
3447 : */
3448 2888 : action_state = makeNode(MergeActionState);
3449 2888 : action_state->mas_action = action;
3450 2888 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3451 : &mtstate->ps);
3452 :
3453 : /*
3454 : * We create three lists - one for each MergeMatchKind - and stick
3455 : * the MergeActionState into the appropriate list.
3456 : */
3457 5776 : resultRelInfo->ri_MergeActions[action->matchKind] =
3458 2888 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3459 : action_state);
3460 :
3461 2888 : switch (action->commandType)
3462 : {
3463 964 : case CMD_INSERT:
3464 964 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3465 : action->targetList);
3466 :
3467 : /*
3468 : * If the MERGE targets a partitioned table, any INSERT
3469 : * actions must be routed through it, not the child
3470 : * relations. Initialize the routing struct and the root
3471 : * table's "new" tuple slot for that, if not already done.
3472 : * The projection we prepare, for all relations, uses the
3473 : * root relation descriptor, and targets the plan's root
3474 : * slot. (This is consistent with the fact that we
3475 : * checked the plan output to match the root relation,
3476 : * above.)
3477 : */
3478 964 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3479 : RELKIND_PARTITIONED_TABLE)
3480 : {
3481 298 : if (mtstate->mt_partition_tuple_routing == NULL)
3482 : {
3483 : /*
3484 : * Initialize planstate for routing if not already
3485 : * done.
3486 : *
3487 : * Note that the slot is managed as a standalone
3488 : * slot belonging to ModifyTableState, so we pass
3489 : * NULL for the 2nd argument.
3490 : */
3491 124 : mtstate->mt_root_tuple_slot =
3492 124 : table_slot_create(rootRelInfo->ri_RelationDesc,
3493 : NULL);
3494 124 : mtstate->mt_partition_tuple_routing =
3495 124 : ExecSetupPartitionTupleRouting(estate,
3496 : rootRelInfo->ri_RelationDesc);
3497 : }
3498 298 : tgtslot = mtstate->mt_root_tuple_slot;
3499 298 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3500 : }
3501 : else
3502 : {
3503 : /* not partitioned? use the stock relation and slot */
3504 666 : tgtslot = resultRelInfo->ri_newTupleSlot;
3505 666 : tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3506 : }
3507 :
3508 964 : action_state->mas_proj =
3509 964 : ExecBuildProjectionInfo(action->targetList, econtext,
3510 : tgtslot,
3511 : &mtstate->ps,
3512 : tgtdesc);
3513 :
3514 964 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3515 964 : break;
3516 1480 : case CMD_UPDATE:
3517 1480 : action_state->mas_proj =
3518 1480 : ExecBuildUpdateProjection(action->targetList,
3519 : true,
3520 : action->updateColnos,
3521 : relationDesc,
3522 : econtext,
3523 : resultRelInfo->ri_newTupleSlot,
3524 : &mtstate->ps);
3525 1480 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3526 1480 : break;
3527 404 : case CMD_DELETE:
3528 404 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3529 404 : break;
3530 40 : case CMD_NOTHING:
3531 40 : break;
3532 0 : default:
3533 0 : elog(ERROR, "unknown operation");
3534 : break;
3535 : }
3536 : }
3537 : }
3538 : }
3539 :
3540 : /*
3541 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3542 : *
3543 : * We mark 'projectNewInfoValid' even though the projections themselves
3544 : * are not initialized here.
3545 : */
3546 : void
3547 1628 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3548 : ResultRelInfo *resultRelInfo)
3549 : {
3550 1628 : EState *estate = mtstate->ps.state;
3551 :
3552 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3553 :
3554 1628 : resultRelInfo->ri_oldTupleSlot =
3555 1628 : table_slot_create(resultRelInfo->ri_RelationDesc,
3556 : &estate->es_tupleTable);
3557 1628 : resultRelInfo->ri_newTupleSlot =
3558 1628 : table_slot_create(resultRelInfo->ri_RelationDesc,
3559 : &estate->es_tupleTable);
3560 1628 : resultRelInfo->ri_projectNewInfoValid = true;
3561 1628 : }
3562 :
3563 : /*
3564 : * Process BEFORE EACH STATEMENT triggers
3565 : */
3566 : static void
3567 117132 : fireBSTriggers(ModifyTableState *node)
3568 : {
3569 117132 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3570 117132 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3571 :
3572 117132 : switch (node->operation)
3573 : {
3574 90800 : case CMD_INSERT:
3575 90800 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3576 90788 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3577 828 : ExecBSUpdateTriggers(node->ps.state,
3578 : resultRelInfo);
3579 90788 : break;
3580 13008 : case CMD_UPDATE:
3581 13008 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3582 13008 : break;
3583 12054 : case CMD_DELETE:
3584 12054 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3585 12054 : break;
3586 1270 : case CMD_MERGE:
3587 1270 : if (node->mt_merge_subcommands & MERGE_INSERT)
3588 718 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3589 1270 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3590 900 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3591 1270 : if (node->mt_merge_subcommands & MERGE_DELETE)
3592 332 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3593 1270 : break;
3594 0 : default:
3595 0 : elog(ERROR, "unknown operation");
3596 : break;
3597 : }
3598 117120 : }
3599 :
3600 : /*
3601 : * Process AFTER EACH STATEMENT triggers
3602 : */
3603 : static void
3604 114100 : fireASTriggers(ModifyTableState *node)
3605 : {
3606 114100 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3607 114100 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3608 :
3609 114100 : switch (node->operation)
3610 : {
3611 88628 : case CMD_INSERT:
3612 88628 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3613 726 : ExecASUpdateTriggers(node->ps.state,
3614 : resultRelInfo,
3615 726 : node->mt_oc_transition_capture);
3616 88628 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3617 88628 : node->mt_transition_capture);
3618 88628 : break;
3619 12416 : case CMD_UPDATE:
3620 12416 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3621 12416 : node->mt_transition_capture);
3622 12416 : break;
3623 11930 : case CMD_DELETE:
3624 11930 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3625 11930 : node->mt_transition_capture);
3626 11930 : break;
3627 1126 : case CMD_MERGE:
3628 1126 : if (node->mt_merge_subcommands & MERGE_DELETE)
3629 296 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3630 296 : node->mt_transition_capture);
3631 1126 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3632 804 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3633 804 : node->mt_transition_capture);
3634 1126 : if (node->mt_merge_subcommands & MERGE_INSERT)
3635 656 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3636 656 : node->mt_transition_capture);
3637 1126 : break;
3638 0 : default:
3639 0 : elog(ERROR, "unknown operation");
3640 : break;
3641 : }
3642 114100 : }
3643 :
3644 : /*
3645 : * Set up the state needed for collecting transition tuples for AFTER
3646 : * triggers.
3647 : */
3648 : static void
3649 117430 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
3650 : {
3651 117430 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
3652 117430 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
3653 :
3654 : /* Check for transition tables on the directly targeted relation. */
3655 117430 : mtstate->mt_transition_capture =
3656 117430 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3657 117430 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3658 : mtstate->operation);
3659 117430 : if (plan->operation == CMD_INSERT &&
3660 90802 : plan->onConflictAction == ONCONFLICT_UPDATE)
3661 828 : mtstate->mt_oc_transition_capture =
3662 828 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3663 828 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3664 : CMD_UPDATE);
3665 117430 : }
3666 :
3667 : /*
3668 : * ExecPrepareTupleRouting --- prepare for routing one tuple
3669 : *
3670 : * Determine the partition in which the tuple in slot is to be inserted,
3671 : * and return its ResultRelInfo in *partRelInfo. The return value is
3672 : * a slot holding the tuple of the partition rowtype.
3673 : *
3674 : * This also sets the transition table information in mtstate based on the
3675 : * selected partition.
3676 : */
3677 : static TupleTableSlot *
3678 723086 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
3679 : EState *estate,
3680 : PartitionTupleRouting *proute,
3681 : ResultRelInfo *targetRelInfo,
3682 : TupleTableSlot *slot,
3683 : ResultRelInfo **partRelInfo)
3684 : {
3685 : ResultRelInfo *partrel;
3686 : TupleConversionMap *map;
3687 :
3688 : /*
3689 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3690 : * not find a valid partition for the tuple in 'slot' then an error is
3691 : * raised. An error may also be raised if the found partition is not a
3692 : * valid target for INSERTs. This is required since a partitioned table
3693 : * UPDATE to another partition becomes a DELETE+INSERT.
3694 : */
3695 723086 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3696 :
3697 : /*
3698 : * If we're capturing transition tuples, we might need to convert from the
3699 : * partition rowtype to root partitioned table's rowtype. But if there
3700 : * are no BEFORE triggers on the partition that could change the tuple, we
3701 : * can just remember the original unconverted tuple to avoid a needless
3702 : * round trip conversion.
3703 : */
3704 722882 : if (mtstate->mt_transition_capture != NULL)
3705 : {
3706 : bool has_before_insert_row_trig;
3707 :
3708 168 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3709 42 : partrel->ri_TrigDesc->trig_insert_before_row);
3710 :
3711 126 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
3712 126 : !has_before_insert_row_trig ? slot : NULL;
3713 : }
3714 :
3715 : /*
3716 : * Convert the tuple, if necessary.
3717 : */
3718 722882 : map = ExecGetRootToChildMap(partrel, estate);
3719 722882 : if (map != NULL)
3720 : {
3721 68428 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3722 :
3723 68428 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3724 : }
3725 :
3726 722882 : *partRelInfo = partrel;
3727 722882 : return slot;
3728 : }
3729 :
3730 : /* ----------------------------------------------------------------
3731 : * ExecModifyTable
3732 : *
3733 : * Perform table modifications as required, and return RETURNING results
3734 : * if needed.
3735 : * ----------------------------------------------------------------
3736 : */
3737 : static TupleTableSlot *
3738 125304 : ExecModifyTable(PlanState *pstate)
3739 : {
3740 125304 : ModifyTableState *node = castNode(ModifyTableState, pstate);
3741 : ModifyTableContext context;
3742 125304 : EState *estate = node->ps.state;
3743 125304 : CmdType operation = node->operation;
3744 : ResultRelInfo *resultRelInfo;
3745 : PlanState *subplanstate;
3746 : TupleTableSlot *slot;
3747 : TupleTableSlot *oldSlot;
3748 : ItemPointerData tuple_ctid;
3749 : HeapTupleData oldtupdata;
3750 : HeapTuple oldtuple;
3751 : ItemPointer tupleid;
3752 :
3753 125304 : CHECK_FOR_INTERRUPTS();
3754 :
3755 : /*
3756 : * This should NOT get called during EvalPlanQual; we should have passed a
3757 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
3758 : * Assert because this condition is easy to miss in testing. (Note:
3759 : * although ModifyTable should not get executed within an EvalPlanQual
3760 : * operation, we do have to allow it to be initialized and shut down in
3761 : * case it is within a CTE subplan. Hence this test must be here, not in
3762 : * ExecInitModifyTable.)
3763 : */
3764 125304 : if (estate->es_epq_active != NULL)
3765 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
3766 :
3767 : /*
3768 : * If we've already completed processing, don't try to do more. We need
3769 : * this test because ExecPostprocessPlan might call us an extra time, and
3770 : * our subplan's nodes aren't necessarily robust against being called
3771 : * extra times.
3772 : */
3773 125304 : if (node->mt_done)
3774 782 : return NULL;
3775 :
3776 : /*
3777 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
3778 : */
3779 124522 : if (node->fireBSTriggers)
3780 : {
3781 117132 : fireBSTriggers(node);
3782 117120 : node->fireBSTriggers = false;
3783 : }
3784 :
3785 : /* Preload local variables */
3786 124510 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
3787 124510 : subplanstate = outerPlanState(node);
3788 :
3789 : /* Set global context */
3790 124510 : context.mtstate = node;
3791 124510 : context.epqstate = &node->mt_epqstate;
3792 124510 : context.estate = estate;
3793 :
3794 : /*
3795 : * Fetch rows from subplan, and execute the required table modification
3796 : * for each row.
3797 : */
3798 : for (;;)
3799 : {
3800 : /*
3801 : * Reset the per-output-tuple exprcontext. This is needed because
3802 : * triggers expect to use that context as workspace. It's a bit ugly
3803 : * to do this below the top level of the plan, however. We might need
3804 : * to rethink this later.
3805 : */
3806 13155216 : ResetPerTupleExprContext(estate);
3807 :
3808 : /*
3809 : * Reset per-tuple memory context used for processing on conflict and
3810 : * returning clauses, to free any expression evaluation storage
3811 : * allocated in the previous cycle.
3812 : */
3813 13155216 : if (pstate->ps_ExprContext)
3814 332966 : ResetExprContext(pstate->ps_ExprContext);
3815 :
3816 : /*
3817 : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
3818 : * to execute, do so now --- see the comments in ExecMerge().
3819 : */
3820 13155216 : if (node->mt_merge_pending_not_matched != NULL)
3821 : {
3822 2 : context.planSlot = node->mt_merge_pending_not_matched;
3823 :
3824 2 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
3825 2 : node->canSetTag);
3826 :
3827 : /* Clear the pending action */
3828 2 : node->mt_merge_pending_not_matched = NULL;
3829 :
3830 : /*
3831 : * If we got a RETURNING result, return it to the caller. We'll
3832 : * continue the work on next call.
3833 : */
3834 2 : if (slot)
3835 2 : return slot;
3836 :
3837 0 : continue; /* continue with the next tuple */
3838 : }
3839 :
3840 : /* Fetch the next row from subplan */
3841 13155214 : context.planSlot = ExecProcNode(subplanstate);
3842 :
3843 : /* No more tuples to process? */
3844 13154814 : if (TupIsNull(context.planSlot))
3845 : break;
3846 :
3847 : /*
3848 : * When there are multiple result relations, each tuple contains a
3849 : * junk column that gives the OID of the rel from which it came.
3850 : * Extract it and select the correct result relation.
3851 : */
3852 13040714 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
3853 : {
3854 : Datum datum;
3855 : bool isNull;
3856 : Oid resultoid;
3857 :
3858 4852 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
3859 : &isNull);
3860 4852 : if (isNull)
3861 : {
3862 : /*
3863 : * For commands other than MERGE, any tuples having InvalidOid
3864 : * for tableoid are errors. For MERGE, we may need to handle
3865 : * them as WHEN NOT MATCHED clauses if any, so do that.
3866 : *
3867 : * Note that we use the node's toplevel resultRelInfo, not any
3868 : * specific partition's.
3869 : */
3870 466 : if (operation == CMD_MERGE)
3871 : {
3872 466 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3873 :
3874 466 : slot = ExecMerge(&context, node->resultRelInfo,
3875 466 : NULL, NULL, node->canSetTag);
3876 :
3877 : /*
3878 : * If we got a RETURNING result, return it to the caller.
3879 : * We'll continue the work on next call.
3880 : */
3881 460 : if (slot)
3882 20 : return slot;
3883 :
3884 440 : continue; /* continue with the next tuple */
3885 : }
3886 :
3887 0 : elog(ERROR, "tableoid is NULL");
3888 : }
3889 4386 : resultoid = DatumGetObjectId(datum);
3890 :
3891 : /* If it's not the same as last time, we need to locate the rel */
3892 4386 : if (resultoid != node->mt_lastResultOid)
3893 2972 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
3894 : false, true);
3895 : }
3896 :
3897 : /*
3898 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
3899 : * here is compute the RETURNING expressions.
3900 : */
3901 13040248 : if (resultRelInfo->ri_usesFdwDirectModify)
3902 : {
3903 : Assert(resultRelInfo->ri_projectReturning);
3904 :
3905 : /*
3906 : * A scan slot containing the data that was actually inserted,
3907 : * updated or deleted has already been made available to
3908 : * ExecProcessReturning by IterateDirectModify, so no need to
3909 : * provide it here.
3910 : */
3911 694 : slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot);
3912 :
3913 694 : return slot;
3914 : }
3915 :
3916 13039554 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3917 13039554 : slot = context.planSlot;
3918 :
3919 13039554 : tupleid = NULL;
3920 13039554 : oldtuple = NULL;
3921 :
3922 : /*
3923 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
3924 : * to be updated/deleted/merged. For a heap relation, that's a TID;
3925 : * otherwise we may have a wholerow junk attr that carries the old
3926 : * tuple in toto. Keep this in step with the part of
3927 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
3928 : */
3929 13039554 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
3930 : operation == CMD_MERGE)
3931 : {
3932 : char relkind;
3933 : Datum datum;
3934 : bool isNull;
3935 :
3936 1838400 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
3937 1838400 : if (relkind == RELKIND_RELATION ||
3938 486 : relkind == RELKIND_MATVIEW ||
3939 : relkind == RELKIND_PARTITIONED_TABLE)
3940 : {
3941 : /* ri_RowIdAttNo refers to a ctid attribute */
3942 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
3943 1837920 : datum = ExecGetJunkAttribute(slot,
3944 1837920 : resultRelInfo->ri_RowIdAttNo,
3945 : &isNull);
3946 :
3947 : /*
3948 : * For commands other than MERGE, any tuples having a null row
3949 : * identifier are errors. For MERGE, we may need to handle
3950 : * them as WHEN NOT MATCHED clauses if any, so do that.
3951 : *
3952 : * Note that we use the node's toplevel resultRelInfo, not any
3953 : * specific partition's.
3954 : */
3955 1837920 : if (isNull)
3956 : {
3957 2106 : if (operation == CMD_MERGE)
3958 : {
3959 2106 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3960 :
3961 2106 : slot = ExecMerge(&context, node->resultRelInfo,
3962 2106 : NULL, NULL, node->canSetTag);
3963 :
3964 : /*
3965 : * If we got a RETURNING result, return it to the
3966 : * caller. We'll continue the work on next call.
3967 : */
3968 2064 : if (slot)
3969 104 : return slot;
3970 :
3971 2002 : continue; /* continue with the next tuple */
3972 : }
3973 :
3974 0 : elog(ERROR, "ctid is NULL");
3975 : }
3976 :
3977 1835814 : tupleid = (ItemPointer) DatumGetPointer(datum);
3978 1835814 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
3979 1835814 : tupleid = &tuple_ctid;
3980 : }
3981 :
3982 : /*
3983 : * Use the wholerow attribute, when available, to reconstruct the
3984 : * old relation tuple. The old tuple serves one or both of two
3985 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
3986 : * provides values for any unchanged columns for the NEW tuple of
3987 : * an UPDATE, because the subplan does not produce all the columns
3988 : * of the target table.
3989 : *
3990 : * Note that the wholerow attribute does not carry system columns,
3991 : * so foreign table triggers miss seeing those, except that we
3992 : * know enough here to set t_tableOid. Quite separately from
3993 : * this, the FDW may fetch its own junk attrs to identify the row.
3994 : *
3995 : * Other relevant relkinds, currently limited to views, always
3996 : * have a wholerow attribute.
3997 : */
3998 480 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
3999 : {
4000 462 : datum = ExecGetJunkAttribute(slot,
4001 462 : resultRelInfo->ri_RowIdAttNo,
4002 : &isNull);
4003 :
4004 : /*
4005 : * For commands other than MERGE, any tuples having a null row
4006 : * identifier are errors. For MERGE, we may need to handle
4007 : * them as WHEN NOT MATCHED clauses if any, so do that.
4008 : *
4009 : * Note that we use the node's toplevel resultRelInfo, not any
4010 : * specific partition's.
4011 : */
4012 462 : if (isNull)
4013 : {
4014 48 : if (operation == CMD_MERGE)
4015 : {
4016 48 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4017 :
4018 48 : slot = ExecMerge(&context, node->resultRelInfo,
4019 48 : NULL, NULL, node->canSetTag);
4020 :
4021 : /*
4022 : * If we got a RETURNING result, return it to the
4023 : * caller. We'll continue the work on next call.
4024 : */
4025 42 : if (slot)
4026 12 : return slot;
4027 :
4028 30 : continue; /* continue with the next tuple */
4029 : }
4030 :
4031 0 : elog(ERROR, "wholerow is NULL");
4032 : }
4033 :
4034 414 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4035 414 : oldtupdata.t_len =
4036 414 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4037 414 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4038 : /* Historically, view triggers see invalid t_tableOid. */
4039 414 : oldtupdata.t_tableOid =
4040 414 : (relkind == RELKIND_VIEW) ? InvalidOid :
4041 162 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4042 :
4043 414 : oldtuple = &oldtupdata;
4044 : }
4045 : else
4046 : {
4047 : /* Only foreign tables are allowed to omit a row-ID attr */
4048 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4049 : }
4050 : }
4051 :
4052 13037400 : switch (operation)
4053 : {
4054 11201154 : case CMD_INSERT:
4055 : /* Initialize projection info if first time for this table */
4056 11201154 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4057 89716 : ExecInitInsertProjection(node, resultRelInfo);
4058 11201154 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4059 11201154 : slot = ExecInsert(&context, resultRelInfo, slot,
4060 11201154 : node->canSetTag, NULL, NULL);
4061 11199174 : break;
4062 :
4063 307032 : case CMD_UPDATE:
4064 : /* Initialize projection info if first time for this table */
4065 307032 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4066 12748 : ExecInitUpdateProjection(node, resultRelInfo);
4067 :
4068 : /*
4069 : * Make the new tuple by combining plan's output tuple with
4070 : * the old tuple being updated.
4071 : */
4072 307032 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4073 307032 : if (oldtuple != NULL)
4074 : {
4075 : /* Use the wholerow junk attr as the old tuple. */
4076 258 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4077 : }
4078 : else
4079 : {
4080 : /* Fetch the most recent version of old tuple. */
4081 306774 : Relation relation = resultRelInfo->ri_RelationDesc;
4082 :
4083 306774 : if (!table_tuple_fetch_row_version(relation, tupleid,
4084 : SnapshotAny,
4085 : oldSlot))
4086 0 : elog(ERROR, "failed to fetch tuple being updated");
4087 : }
4088 307032 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4089 : oldSlot);
4090 :
4091 : /* Now apply the update. */
4092 307032 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4093 307032 : slot, node->canSetTag);
4094 306630 : break;
4095 :
4096 1524822 : case CMD_DELETE:
4097 1524822 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4098 1524822 : true, false, node->canSetTag, NULL, NULL, NULL);
4099 1524740 : break;
4100 :
4101 4392 : case CMD_MERGE:
4102 4392 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4103 4392 : node->canSetTag);
4104 4302 : break;
4105 :
4106 0 : default:
4107 0 : elog(ERROR, "unknown operation");
4108 : break;
4109 : }
4110 :
4111 : /*
4112 : * If we got a RETURNING result, return it to caller. We'll continue
4113 : * the work on next call.
4114 : */
4115 13034846 : if (slot)
4116 6582 : return slot;
4117 : }
4118 :
4119 : /*
4120 : * Insert remaining tuples for batch insert.
4121 : */
4122 114100 : if (estate->es_insert_pending_result_relations != NIL)
4123 24 : ExecPendingInserts(estate);
4124 :
4125 : /*
4126 : * We're done, but fire AFTER STATEMENT triggers before exiting.
4127 : */
4128 114100 : fireASTriggers(node);
4129 :
4130 114100 : node->mt_done = true;
4131 :
4132 114100 : return NULL;
4133 : }
4134 :
4135 : /*
4136 : * ExecLookupResultRelByOid
4137 : * If the table with given OID is among the result relations to be
4138 : * updated by the given ModifyTable node, return its ResultRelInfo.
4139 : *
4140 : * If not found, return NULL if missing_ok, else raise error.
4141 : *
4142 : * If update_cache is true, then upon successful lookup, update the node's
4143 : * one-element cache. ONLY ExecModifyTable may pass true for this.
4144 : */
4145 : ResultRelInfo *
4146 12844 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4147 : bool missing_ok, bool update_cache)
4148 : {
4149 12844 : if (node->mt_resultOidHash)
4150 : {
4151 : /* Use the pre-built hash table to locate the rel */
4152 : MTTargetRelLookup *mtlookup;
4153 :
4154 : mtlookup = (MTTargetRelLookup *)
4155 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4156 0 : if (mtlookup)
4157 : {
4158 0 : if (update_cache)
4159 : {
4160 0 : node->mt_lastResultOid = resultoid;
4161 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
4162 : }
4163 0 : return node->resultRelInfo + mtlookup->relationIndex;
4164 : }
4165 : }
4166 : else
4167 : {
4168 : /* With few target rels, just search the ResultRelInfo array */
4169 24732 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4170 : {
4171 15342 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4172 :
4173 15342 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4174 : {
4175 3454 : if (update_cache)
4176 : {
4177 2972 : node->mt_lastResultOid = resultoid;
4178 2972 : node->mt_lastResultIndex = ndx;
4179 : }
4180 3454 : return rInfo;
4181 : }
4182 : }
4183 : }
4184 :
4185 9390 : if (!missing_ok)
4186 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
4187 9390 : return NULL;
4188 : }
4189 :
4190 : /* ----------------------------------------------------------------
4191 : * ExecInitModifyTable
4192 : * ----------------------------------------------------------------
4193 : */
4194 : ModifyTableState *
4195 118248 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4196 : {
4197 : ModifyTableState *mtstate;
4198 118248 : Plan *subplan = outerPlan(node);
4199 118248 : CmdType operation = node->operation;
4200 118248 : int nrels = list_length(node->resultRelations);
4201 : ResultRelInfo *resultRelInfo;
4202 : List *arowmarks;
4203 : ListCell *l;
4204 : int i;
4205 : Relation rel;
4206 :
4207 : /* check for unsupported flags */
4208 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4209 :
4210 : /*
4211 : * create state structure
4212 : */
4213 118248 : mtstate = makeNode(ModifyTableState);
4214 118248 : mtstate->ps.plan = (Plan *) node;
4215 118248 : mtstate->ps.state = estate;
4216 118248 : mtstate->ps.ExecProcNode = ExecModifyTable;
4217 :
4218 118248 : mtstate->operation = operation;
4219 118248 : mtstate->canSetTag = node->canSetTag;
4220 118248 : mtstate->mt_done = false;
4221 :
4222 118248 : mtstate->mt_nrels = nrels;
4223 118248 : mtstate->resultRelInfo = (ResultRelInfo *)
4224 118248 : palloc(nrels * sizeof(ResultRelInfo));
4225 :
4226 118248 : mtstate->mt_merge_pending_not_matched = NULL;
4227 118248 : mtstate->mt_merge_inserted = 0;
4228 118248 : mtstate->mt_merge_updated = 0;
4229 118248 : mtstate->mt_merge_deleted = 0;
4230 :
4231 : /*----------
4232 : * Resolve the target relation. This is the same as:
4233 : *
4234 : * - the relation for which we will fire FOR STATEMENT triggers,
4235 : * - the relation into whose tuple format all captured transition tuples
4236 : * must be converted, and
4237 : * - the root partitioned table used for tuple routing.
4238 : *
4239 : * If it's a partitioned or inherited table, the root partition or
4240 : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4241 : * given explicitly in node->rootRelation. Otherwise, the target relation
4242 : * is the sole relation in the node->resultRelations list.
4243 : *----------
4244 : */
4245 118248 : if (node->rootRelation > 0)
4246 : {
4247 2526 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4248 2526 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4249 : node->rootRelation);
4250 : }
4251 : else
4252 : {
4253 : Assert(list_length(node->resultRelations) == 1);
4254 115722 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4255 115722 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4256 115722 : linitial_int(node->resultRelations));
4257 : }
4258 :
4259 : /* set up epqstate with dummy subplan data for the moment */
4260 118248 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4261 : node->epqParam, node->resultRelations);
4262 118248 : mtstate->fireBSTriggers = true;
4263 :
4264 : /*
4265 : * Build state for collecting transition tuples. This requires having a
4266 : * valid trigger query context, so skip it in explain-only mode.
4267 : */
4268 118248 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4269 117430 : ExecSetupTransitionCaptureState(mtstate, estate);
4270 :
4271 : /*
4272 : * Open all the result relations and initialize the ResultRelInfo structs.
4273 : * (But root relation was initialized above, if it's part of the array.)
4274 : * We must do this before initializing the subplan, because direct-modify
4275 : * FDWs expect their ResultRelInfos to be available.
4276 : */
4277 118248 : resultRelInfo = mtstate->resultRelInfo;
4278 118248 : i = 0;
4279 238554 : foreach(l, node->resultRelations)
4280 : {
4281 120580 : Index resultRelation = lfirst_int(l);
4282 120580 : List *mergeActions = NIL;
4283 :
4284 120580 : if (node->mergeActionLists)
4285 1610 : mergeActions = list_nth(node->mergeActionLists, i);
4286 :
4287 120580 : if (resultRelInfo != mtstate->rootResultRelInfo)
4288 : {
4289 4858 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4290 :
4291 : /*
4292 : * For child result relations, store the root result relation
4293 : * pointer. We do so for the convenience of places that want to
4294 : * look at the query's original target relation but don't have the
4295 : * mtstate handy.
4296 : */
4297 4858 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4298 : }
4299 :
4300 : /* Initialize the usesFdwDirectModify flag */
4301 120580 : resultRelInfo->ri_usesFdwDirectModify =
4302 120580 : bms_is_member(i, node->fdwDirectModifyPlans);
4303 :
4304 : /*
4305 : * Verify result relation is a valid target for the current operation
4306 : */
4307 120580 : CheckValidResultRel(resultRelInfo, operation, mergeActions);
4308 :
4309 120306 : resultRelInfo++;
4310 120306 : i++;
4311 : }
4312 :
4313 : /*
4314 : * Now we may initialize the subplan.
4315 : */
4316 117974 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4317 :
4318 : /*
4319 : * Do additional per-result-relation initialization.
4320 : */
4321 238246 : for (i = 0; i < nrels; i++)
4322 : {
4323 120272 : resultRelInfo = &mtstate->resultRelInfo[i];
4324 :
4325 : /* Let FDWs init themselves for foreign-table result rels */
4326 120272 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4327 120064 : resultRelInfo->ri_FdwRoutine != NULL &&
4328 306 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4329 : {
4330 306 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4331 :
4332 306 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4333 : resultRelInfo,
4334 : fdw_private,
4335 : i,
4336 : eflags);
4337 : }
4338 :
4339 : /*
4340 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4341 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4342 : * tables, the FDW might have created additional junk attr(s), but
4343 : * those are no concern of ours.
4344 : */
4345 120272 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4346 : operation == CMD_MERGE)
4347 : {
4348 : char relkind;
4349 :
4350 29246 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4351 29246 : if (relkind == RELKIND_RELATION ||
4352 634 : relkind == RELKIND_MATVIEW ||
4353 : relkind == RELKIND_PARTITIONED_TABLE)
4354 : {
4355 28648 : resultRelInfo->ri_RowIdAttNo =
4356 28648 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4357 28648 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4358 0 : elog(ERROR, "could not find junk ctid column");
4359 : }
4360 598 : else if (relkind == RELKIND_FOREIGN_TABLE)
4361 : {
4362 : /*
4363 : * We don't support MERGE with foreign tables for now. (It's
4364 : * problematic because the implementation uses CTID.)
4365 : */
4366 : Assert(operation != CMD_MERGE);
4367 :
4368 : /*
4369 : * When there is a row-level trigger, there should be a
4370 : * wholerow attribute. We also require it to be present in
4371 : * UPDATE and MERGE, so we can get the values of unchanged
4372 : * columns.
4373 : */
4374 340 : resultRelInfo->ri_RowIdAttNo =
4375 340 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4376 : "wholerow");
4377 340 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4378 190 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4379 0 : elog(ERROR, "could not find junk wholerow column");
4380 : }
4381 : else
4382 : {
4383 : /* Other valid target relkinds must provide wholerow */
4384 258 : resultRelInfo->ri_RowIdAttNo =
4385 258 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4386 : "wholerow");
4387 258 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4388 0 : elog(ERROR, "could not find junk wholerow column");
4389 : }
4390 : }
4391 : }
4392 :
4393 : /*
4394 : * If this is an inherited update/delete/merge, there will be a junk
4395 : * attribute named "tableoid" present in the subplan's targetlist. It
4396 : * will be used to identify the result relation for a given tuple to be
4397 : * updated/deleted/merged.
4398 : */
4399 117974 : mtstate->mt_resultOidAttno =
4400 117974 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4401 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || nrels == 1);
4402 117974 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4403 117974 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4404 :
4405 : /* Get the root target relation */
4406 117974 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4407 :
4408 : /*
4409 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4410 : * or MERGE might need this too, but only if it actually moves tuples
4411 : * between partitions; in that case setup is done by
4412 : * ExecCrossPartitionUpdate.
4413 : */
4414 117974 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4415 : operation == CMD_INSERT)
4416 7146 : mtstate->mt_partition_tuple_routing =
4417 7146 : ExecSetupPartitionTupleRouting(estate, rel);
4418 :
4419 : /*
4420 : * Initialize any WITH CHECK OPTION constraints if needed.
4421 : */
4422 117974 : resultRelInfo = mtstate->resultRelInfo;
4423 119308 : foreach(l, node->withCheckOptionLists)
4424 : {
4425 1334 : List *wcoList = (List *) lfirst(l);
4426 1334 : List *wcoExprs = NIL;
4427 : ListCell *ll;
4428 :
4429 3632 : foreach(ll, wcoList)
4430 : {
4431 2298 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4432 2298 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4433 : &mtstate->ps);
4434 :
4435 2298 : wcoExprs = lappend(wcoExprs, wcoExpr);
4436 : }
4437 :
4438 1334 : resultRelInfo->ri_WithCheckOptions = wcoList;
4439 1334 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4440 1334 : resultRelInfo++;
4441 : }
4442 :
4443 : /*
4444 : * Initialize RETURNING projections if needed.
4445 : */
4446 117974 : if (node->returningLists)
4447 : {
4448 : TupleTableSlot *slot;
4449 : ExprContext *econtext;
4450 :
4451 : /*
4452 : * Initialize result tuple slot and assign its rowtype using the first
4453 : * RETURNING list. We assume the rest will look the same.
4454 : */
4455 4152 : mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
4456 :
4457 : /* Set up a slot for the output of the RETURNING projection(s) */
4458 4152 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
4459 4152 : slot = mtstate->ps.ps_ResultTupleSlot;
4460 :
4461 : /* Need an econtext too */
4462 4152 : if (mtstate->ps.ps_ExprContext == NULL)
4463 4152 : ExecAssignExprContext(estate, &mtstate->ps);
4464 4152 : econtext = mtstate->ps.ps_ExprContext;
4465 :
4466 : /*
4467 : * Build a projection for each result rel.
4468 : */
4469 4152 : resultRelInfo = mtstate->resultRelInfo;
4470 8632 : foreach(l, node->returningLists)
4471 : {
4472 4480 : List *rlist = (List *) lfirst(l);
4473 :
4474 4480 : resultRelInfo->ri_returningList = rlist;
4475 4480 : resultRelInfo->ri_projectReturning =
4476 4480 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
4477 4480 : resultRelInfo->ri_RelationDesc->rd_att);
4478 4480 : resultRelInfo++;
4479 : }
4480 : }
4481 : else
4482 : {
4483 : /*
4484 : * We still must construct a dummy result tuple type, because InitPlan
4485 : * expects one (maybe should change that?).
4486 : */
4487 113822 : mtstate->ps.plan->targetlist = NIL;
4488 113822 : ExecInitResultTypeTL(&mtstate->ps);
4489 :
4490 113822 : mtstate->ps.ps_ExprContext = NULL;
4491 : }
4492 :
4493 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
4494 117974 : resultRelInfo = mtstate->resultRelInfo;
4495 117974 : if (node->onConflictAction != ONCONFLICT_NONE)
4496 : {
4497 : /* insert may only have one relation, inheritance is not expanded */
4498 : Assert(nrels == 1);
4499 1188 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4500 : }
4501 :
4502 : /*
4503 : * If needed, Initialize target list, projection and qual for ON CONFLICT
4504 : * DO UPDATE.
4505 : */
4506 117974 : if (node->onConflictAction == ONCONFLICT_UPDATE)
4507 : {
4508 900 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
4509 : ExprContext *econtext;
4510 : TupleDesc relationDesc;
4511 :
4512 : /* already exists if created by RETURNING processing above */
4513 900 : if (mtstate->ps.ps_ExprContext == NULL)
4514 632 : ExecAssignExprContext(estate, &mtstate->ps);
4515 :
4516 900 : econtext = mtstate->ps.ps_ExprContext;
4517 900 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4518 :
4519 : /* create state for DO UPDATE SET operation */
4520 900 : resultRelInfo->ri_onConflict = onconfl;
4521 :
4522 : /* initialize slot for the existing tuple */
4523 900 : onconfl->oc_Existing =
4524 900 : table_slot_create(resultRelInfo->ri_RelationDesc,
4525 900 : &mtstate->ps.state->es_tupleTable);
4526 :
4527 : /*
4528 : * Create the tuple slot for the UPDATE SET projection. We want a slot
4529 : * of the table's type here, because the slot will be used to insert
4530 : * into the table, and for RETURNING processing - which may access
4531 : * system attributes.
4532 : */
4533 900 : onconfl->oc_ProjSlot =
4534 900 : table_slot_create(resultRelInfo->ri_RelationDesc,
4535 900 : &mtstate->ps.state->es_tupleTable);
4536 :
4537 : /* build UPDATE SET projection state */
4538 900 : onconfl->oc_ProjInfo =
4539 900 : ExecBuildUpdateProjection(node->onConflictSet,
4540 : true,
4541 : node->onConflictCols,
4542 : relationDesc,
4543 : econtext,
4544 : onconfl->oc_ProjSlot,
4545 : &mtstate->ps);
4546 :
4547 : /* initialize state to evaluate the WHERE clause, if any */
4548 900 : if (node->onConflictWhere)
4549 : {
4550 : ExprState *qualexpr;
4551 :
4552 176 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
4553 : &mtstate->ps);
4554 176 : onconfl->oc_WhereClause = qualexpr;
4555 : }
4556 : }
4557 :
4558 : /*
4559 : * If we have any secondary relations in an UPDATE or DELETE, they need to
4560 : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
4561 : * EvalPlanQual mechanism needs to be told about them. This also goes for
4562 : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
4563 : */
4564 117974 : arowmarks = NIL;
4565 120456 : foreach(l, node->rowMarks)
4566 : {
4567 2482 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
4568 : ExecRowMark *erm;
4569 : ExecAuxRowMark *aerm;
4570 :
4571 : /* ignore "parent" rowmarks; they are irrelevant at runtime */
4572 2482 : if (rc->isParent)
4573 100 : continue;
4574 :
4575 : /* Find ExecRowMark and build ExecAuxRowMark */
4576 2382 : erm = ExecFindRowMark(estate, rc->rti, false);
4577 2382 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4578 2382 : arowmarks = lappend(arowmarks, aerm);
4579 : }
4580 :
4581 : /* For a MERGE command, initialize its state */
4582 117974 : if (mtstate->operation == CMD_MERGE)
4583 1378 : ExecInitMerge(mtstate, estate);
4584 :
4585 117974 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4586 :
4587 : /*
4588 : * If there are a lot of result relations, use a hash table to speed the
4589 : * lookups. If there are not a lot, a simple linear search is faster.
4590 : *
4591 : * It's not clear where the threshold is, but try 64 for starters. In a
4592 : * debugging build, use a small threshold so that we get some test
4593 : * coverage of both code paths.
4594 : */
4595 : #ifdef USE_ASSERT_CHECKING
4596 : #define MT_NRELS_HASH 4
4597 : #else
4598 : #define MT_NRELS_HASH 64
4599 : #endif
4600 117974 : if (nrels >= MT_NRELS_HASH)
4601 : {
4602 : HASHCTL hash_ctl;
4603 :
4604 0 : hash_ctl.keysize = sizeof(Oid);
4605 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4606 0 : hash_ctl.hcxt = CurrentMemoryContext;
4607 0 : mtstate->mt_resultOidHash =
4608 0 : hash_create("ModifyTable target hash",
4609 : nrels, &hash_ctl,
4610 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
4611 0 : for (i = 0; i < nrels; i++)
4612 : {
4613 : Oid hashkey;
4614 : MTTargetRelLookup *mtlookup;
4615 : bool found;
4616 :
4617 0 : resultRelInfo = &mtstate->resultRelInfo[i];
4618 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
4619 : mtlookup = (MTTargetRelLookup *)
4620 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
4621 : HASH_ENTER, &found);
4622 : Assert(!found);
4623 0 : mtlookup->relationIndex = i;
4624 : }
4625 : }
4626 : else
4627 117974 : mtstate->mt_resultOidHash = NULL;
4628 :
4629 : /*
4630 : * Determine if the FDW supports batch insert and determine the batch size
4631 : * (a FDW may support batching, but it may be disabled for the
4632 : * server/table).
4633 : *
4634 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
4635 : * remains set to 0.
4636 : */
4637 117974 : if (operation == CMD_INSERT)
4638 : {
4639 : /* insert may only have one relation, inheritance is not expanded */
4640 : Assert(nrels == 1);
4641 91026 : resultRelInfo = mtstate->resultRelInfo;
4642 91026 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4643 91026 : resultRelInfo->ri_FdwRoutine != NULL &&
4644 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
4645 174 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
4646 : {
4647 174 : resultRelInfo->ri_BatchSize =
4648 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
4649 174 : Assert(resultRelInfo->ri_BatchSize >= 1);
4650 : }
4651 : else
4652 90852 : resultRelInfo->ri_BatchSize = 1;
4653 : }
4654 :
4655 : /*
4656 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
4657 : * to estate->es_auxmodifytables so that it will be run to completion by
4658 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
4659 : * ModifyTable node too, but there's no need.) Note the use of lcons not
4660 : * lappend: we need later-initialized ModifyTable nodes to be shut down
4661 : * before earlier ones. This ensures that we don't throw away RETURNING
4662 : * rows that need to be seen by a later CTE subplan.
4663 : */
4664 117974 : if (!mtstate->canSetTag)
4665 912 : estate->es_auxmodifytables = lcons(mtstate,
4666 : estate->es_auxmodifytables);
4667 :
4668 117974 : return mtstate;
4669 : }
4670 :
4671 : /* ----------------------------------------------------------------
4672 : * ExecEndModifyTable
4673 : *
4674 : * Shuts down the plan.
4675 : *
4676 : * Returns nothing of interest.
4677 : * ----------------------------------------------------------------
4678 : */
4679 : void
4680 113894 : ExecEndModifyTable(ModifyTableState *node)
4681 : {
4682 : int i;
4683 :
4684 : /*
4685 : * Allow any FDWs to shut down
4686 : */
4687 229820 : for (i = 0; i < node->mt_nrels; i++)
4688 : {
4689 : int j;
4690 115926 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
4691 :
4692 115926 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4693 115734 : resultRelInfo->ri_FdwRoutine != NULL &&
4694 286 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
4695 286 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
4696 : resultRelInfo);
4697 :
4698 : /*
4699 : * Cleanup the initialized batch slots. This only matters for FDWs
4700 : * with batching, but the other cases will have ri_NumSlotsInitialized
4701 : * == 0.
4702 : */
4703 115982 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
4704 : {
4705 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
4706 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
4707 : }
4708 : }
4709 :
4710 : /*
4711 : * Close all the partitioned tables, leaf partitions, and their indices
4712 : * and release the slot used for tuple routing, if set.
4713 : */
4714 113894 : if (node->mt_partition_tuple_routing)
4715 : {
4716 7152 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
4717 :
4718 7152 : if (node->mt_root_tuple_slot)
4719 554 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
4720 : }
4721 :
4722 : /*
4723 : * Terminate EPQ execution if active
4724 : */
4725 113894 : EvalPlanQualEnd(&node->mt_epqstate);
4726 :
4727 : /*
4728 : * shut down subplan
4729 : */
4730 113894 : ExecEndNode(outerPlanState(node));
4731 113894 : }
4732 :
4733 : void
4734 0 : ExecReScanModifyTable(ModifyTableState *node)
4735 : {
4736 : /*
4737 : * Currently, we don't need to support rescan on ModifyTable nodes. The
4738 : * semantics of that would be a bit debatable anyway.
4739 : */
4740 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
4741 : }
|