Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * The relation to modify can be an ordinary table, a foreign table, or a
28 : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : * targeted a view not in one of those two categories, earlier processing
31 : * already pointed the ModifyTable result relation to an underlying
32 : * relation of that other view. This node does process
33 : * ri_WithCheckOptions, which may have expressions from those other,
34 : * automatically updatable views.
35 : *
36 : * MERGE runs a join between the source relation and the target table.
37 : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : * is an outer join that might output tuples without a matching target
39 : * tuple. In this case, any unmatched target tuples will have NULL
40 : * row-locating info, and only INSERT can be run. But for matched target
41 : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : * SOURCE, all tuples produced by the join will include a matching target
44 : * tuple, so all tuples contain row-locating info.
45 : *
46 : * If the query specifies RETURNING, then the ModifyTable returns a
47 : * RETURNING tuple after completing each row insert, update, or delete.
48 : * It must be called again to continue the operation. Without RETURNING,
49 : * we just loop within the node until all the work is done, then
50 : * return NULL. This avoids useless call/return overhead.
51 : */
52 :
53 : #include "postgres.h"
54 :
55 : #include "access/htup_details.h"
56 : #include "access/tableam.h"
57 : #include "access/xact.h"
58 : #include "commands/trigger.h"
59 : #include "executor/execPartition.h"
60 : #include "executor/executor.h"
61 : #include "executor/nodeModifyTable.h"
62 : #include "foreign/fdwapi.h"
63 : #include "miscadmin.h"
64 : #include "nodes/nodeFuncs.h"
65 : #include "optimizer/optimizer.h"
66 : #include "rewrite/rewriteHandler.h"
67 : #include "storage/lmgr.h"
68 : #include "utils/builtins.h"
69 : #include "utils/datum.h"
70 : #include "utils/rel.h"
71 : #include "utils/snapmgr.h"
72 :
73 :
74 : typedef struct MTTargetRelLookup
75 : {
76 : Oid relationOid; /* hash key, must be first */
77 : int relationIndex; /* rel's index in resultRelInfo[] array */
78 : } MTTargetRelLookup;
79 :
80 : /*
81 : * Context struct for a ModifyTable operation, containing basic execution
82 : * state and some output variables populated by ExecUpdateAct() and
83 : * ExecDeleteAct() to report the result of their actions to callers.
84 : */
85 : typedef struct ModifyTableContext
86 : {
87 : /* Operation state */
88 : ModifyTableState *mtstate;
89 : EPQState *epqstate;
90 : EState *estate;
91 :
92 : /*
93 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
94 : * access "junk" columns that are not going to be stored.
95 : */
96 : TupleTableSlot *planSlot;
97 :
98 : /*
99 : * Information about the changes that were made concurrently to a tuple
100 : * being updated or deleted
101 : */
102 : TM_FailureData tmfd;
103 :
104 : /*
105 : * The tuple projected by the INSERT's RETURNING clause, when doing a
106 : * cross-partition UPDATE
107 : */
108 : TupleTableSlot *cpUpdateReturningSlot;
109 : } ModifyTableContext;
110 :
111 : /*
112 : * Context struct containing output data specific to UPDATE operations.
113 : */
114 : typedef struct UpdateContext
115 : {
116 : bool crossPartUpdate; /* was it a cross-partition update? */
117 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
118 :
119 : /*
120 : * Lock mode to acquire on the latest tuple version before performing
121 : * EvalPlanQual on it
122 : */
123 : LockTupleMode lockmode;
124 : } UpdateContext;
125 :
126 :
127 : static void ExecBatchInsert(ModifyTableState *mtstate,
128 : ResultRelInfo *resultRelInfo,
129 : TupleTableSlot **slots,
130 : TupleTableSlot **planSlots,
131 : int numSlots,
132 : EState *estate,
133 : bool canSetTag);
134 : static void ExecPendingInserts(EState *estate);
135 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
136 : ResultRelInfo *sourcePartInfo,
137 : ResultRelInfo *destPartInfo,
138 : ItemPointer tupleid,
139 : TupleTableSlot *oldslot,
140 : TupleTableSlot *newslot);
141 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
142 : ResultRelInfo *resultRelInfo,
143 : ItemPointer conflictTid,
144 : TupleTableSlot *excludedSlot,
145 : bool canSetTag,
146 : TupleTableSlot **returning);
147 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
148 : EState *estate,
149 : PartitionTupleRouting *proute,
150 : ResultRelInfo *targetRelInfo,
151 : TupleTableSlot *slot,
152 : ResultRelInfo **partRelInfo);
153 :
154 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
155 : ResultRelInfo *resultRelInfo,
156 : ItemPointer tupleid,
157 : HeapTuple oldtuple,
158 : bool canSetTag);
159 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
160 : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
161 : ResultRelInfo *resultRelInfo,
162 : ItemPointer tupleid,
163 : HeapTuple oldtuple,
164 : bool canSetTag,
165 : bool *matched);
166 : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
167 : ResultRelInfo *resultRelInfo,
168 : bool canSetTag);
169 :
170 :
171 : /*
172 : * Verify that the tuples to be produced by INSERT match the
173 : * target relation's rowtype
174 : *
175 : * We do this to guard against stale plans. If plan invalidation is
176 : * functioning properly then we should never get a failure here, but better
177 : * safe than sorry. Note that this is called after we have obtained lock
178 : * on the target rel, so the rowtype can't change underneath us.
179 : *
180 : * The plan output is represented by its targetlist, because that makes
181 : * handling the dropped-column case easier.
182 : *
183 : * We used to use this for UPDATE as well, but now the equivalent checks
184 : * are done in ExecBuildUpdateProjection.
185 : */
186 : static void
187 88718 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
188 : {
189 88718 : TupleDesc resultDesc = RelationGetDescr(resultRel);
190 88718 : int attno = 0;
191 : ListCell *lc;
192 :
193 271022 : foreach(lc, targetList)
194 : {
195 182304 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
196 : Form_pg_attribute attr;
197 :
198 : Assert(!tle->resjunk); /* caller removed junk items already */
199 :
200 182304 : if (attno >= resultDesc->natts)
201 0 : ereport(ERROR,
202 : (errcode(ERRCODE_DATATYPE_MISMATCH),
203 : errmsg("table row type and query-specified row type do not match"),
204 : errdetail("Query has too many columns.")));
205 182304 : attr = TupleDescAttr(resultDesc, attno);
206 182304 : attno++;
207 :
208 182304 : if (!attr->attisdropped)
209 : {
210 : /* Normal case: demand type match */
211 181694 : if (exprType((Node *) tle->expr) != attr->atttypid)
212 0 : ereport(ERROR,
213 : (errcode(ERRCODE_DATATYPE_MISMATCH),
214 : errmsg("table row type and query-specified row type do not match"),
215 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
216 : format_type_be(attr->atttypid),
217 : attno,
218 : format_type_be(exprType((Node *) tle->expr)))));
219 : }
220 : else
221 : {
222 : /*
223 : * For a dropped column, we can't check atttypid (it's likely 0).
224 : * In any case the planner has most likely inserted an INT4 null.
225 : * What we insist on is just *some* NULL constant.
226 : */
227 610 : if (!IsA(tle->expr, Const) ||
228 610 : !((Const *) tle->expr)->constisnull)
229 0 : ereport(ERROR,
230 : (errcode(ERRCODE_DATATYPE_MISMATCH),
231 : errmsg("table row type and query-specified row type do not match"),
232 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
233 : attno)));
234 : }
235 : }
236 88718 : if (attno != resultDesc->natts)
237 0 : ereport(ERROR,
238 : (errcode(ERRCODE_DATATYPE_MISMATCH),
239 : errmsg("table row type and query-specified row type do not match"),
240 : errdetail("Query has too few columns.")));
241 88718 : }
242 :
243 : /*
244 : * ExecProcessReturning --- evaluate a RETURNING list
245 : *
246 : * resultRelInfo: current result rel
247 : * tupleSlot: slot holding tuple actually inserted/updated/deleted
248 : * planSlot: slot holding tuple returned by top subplan node
249 : *
250 : * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
251 : * scan tuple.
252 : *
253 : * Returns a slot holding the result tuple
254 : */
255 : static TupleTableSlot *
256 7440 : ExecProcessReturning(ResultRelInfo *resultRelInfo,
257 : TupleTableSlot *tupleSlot,
258 : TupleTableSlot *planSlot)
259 : {
260 7440 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
261 7440 : ExprContext *econtext = projectReturning->pi_exprContext;
262 :
263 : /* Make tuple and any needed join variables available to ExecProject */
264 7440 : if (tupleSlot)
265 6746 : econtext->ecxt_scantuple = tupleSlot;
266 7440 : econtext->ecxt_outertuple = planSlot;
267 :
268 : /*
269 : * RETURNING expressions might reference the tableoid column, so
270 : * reinitialize tts_tableOid before evaluating them.
271 : */
272 7440 : econtext->ecxt_scantuple->tts_tableOid =
273 7440 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
274 :
275 : /* Compute the RETURNING expressions */
276 7440 : return ExecProject(projectReturning);
277 : }
278 :
279 : /*
280 : * ExecCheckTupleVisible -- verify tuple is visible
281 : *
282 : * It would not be consistent with guarantees of the higher isolation levels to
283 : * proceed with avoiding insertion (taking speculative insertion's alternative
284 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
285 : * Check for the need to raise a serialization failure, and do so as necessary.
286 : */
287 : static void
288 5240 : ExecCheckTupleVisible(EState *estate,
289 : Relation rel,
290 : TupleTableSlot *slot)
291 : {
292 5240 : if (!IsolationUsesXactSnapshot())
293 5176 : return;
294 :
295 64 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
296 : {
297 : Datum xminDatum;
298 : TransactionId xmin;
299 : bool isnull;
300 :
301 40 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
302 : Assert(!isnull);
303 40 : xmin = DatumGetTransactionId(xminDatum);
304 :
305 : /*
306 : * We should not raise a serialization failure if the conflict is
307 : * against a tuple inserted by our own transaction, even if it's not
308 : * visible to our snapshot. (This would happen, for example, if
309 : * conflicting keys are proposed for insertion in a single command.)
310 : */
311 40 : if (!TransactionIdIsCurrentTransactionId(xmin))
312 20 : ereport(ERROR,
313 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
314 : errmsg("could not serialize access due to concurrent update")));
315 : }
316 : }
317 :
318 : /*
319 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
320 : */
321 : static void
322 164 : ExecCheckTIDVisible(EState *estate,
323 : ResultRelInfo *relinfo,
324 : ItemPointer tid,
325 : TupleTableSlot *tempSlot)
326 : {
327 164 : Relation rel = relinfo->ri_RelationDesc;
328 :
329 : /* Redundantly check isolation level */
330 164 : if (!IsolationUsesXactSnapshot())
331 100 : return;
332 :
333 64 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
334 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
335 64 : ExecCheckTupleVisible(estate, rel, tempSlot);
336 44 : ExecClearTuple(tempSlot);
337 : }
338 :
339 : /*
340 : * Initialize to compute stored generated columns for a tuple
341 : *
342 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI
343 : * or ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
344 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
345 : *
346 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
347 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
348 : * cross-partition UPDATEs, since a partition might be the target of both
349 : * UPDATE and INSERT actions.
350 : */
351 : void
352 58120 : ExecInitStoredGenerated(ResultRelInfo *resultRelInfo,
353 : EState *estate,
354 : CmdType cmdtype)
355 : {
356 58120 : Relation rel = resultRelInfo->ri_RelationDesc;
357 58120 : TupleDesc tupdesc = RelationGetDescr(rel);
358 58120 : int natts = tupdesc->natts;
359 : ExprState **ri_GeneratedExprs;
360 : int ri_NumGeneratedNeeded;
361 : Bitmapset *updatedCols;
362 : MemoryContext oldContext;
363 :
364 : /* Nothing to do if no generated columns */
365 58120 : if (!(tupdesc->constr && tupdesc->constr->has_generated_stored))
366 57178 : return;
367 :
368 : /*
369 : * In an UPDATE, we can skip computing any generated columns that do not
370 : * depend on any UPDATE target column. But if there is a BEFORE ROW
371 : * UPDATE trigger, we cannot skip because the trigger might change more
372 : * columns.
373 : */
374 942 : if (cmdtype == CMD_UPDATE &&
375 230 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
376 204 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
377 : else
378 738 : updatedCols = NULL;
379 :
380 : /*
381 : * Make sure these data structures are built in the per-query memory
382 : * context so they'll survive throughout the query.
383 : */
384 942 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
385 :
386 942 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
387 942 : ri_NumGeneratedNeeded = 0;
388 :
389 3642 : for (int i = 0; i < natts; i++)
390 : {
391 2700 : if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
392 : {
393 : Expr *expr;
394 :
395 : /* Fetch the GENERATED AS expression tree */
396 962 : expr = (Expr *) build_column_default(rel, i + 1);
397 962 : if (expr == NULL)
398 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
399 : i + 1, RelationGetRelationName(rel));
400 :
401 : /*
402 : * If it's an update with a known set of update target columns,
403 : * see if we can skip the computation.
404 : */
405 962 : if (updatedCols)
406 : {
407 210 : Bitmapset *attrs_used = NULL;
408 :
409 210 : pull_varattnos((Node *) expr, 1, &attrs_used);
410 :
411 210 : if (!bms_overlap(updatedCols, attrs_used))
412 24 : continue; /* need not update this column */
413 : }
414 :
415 : /* No luck, so prepare the expression for execution */
416 938 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
417 938 : ri_NumGeneratedNeeded++;
418 :
419 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
420 938 : if (cmdtype == CMD_UPDATE)
421 212 : resultRelInfo->ri_extraUpdatedCols =
422 212 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
423 : i + 1 - FirstLowInvalidHeapAttributeNumber);
424 : }
425 : }
426 :
427 : /* Save in appropriate set of fields */
428 942 : if (cmdtype == CMD_UPDATE)
429 : {
430 : /* Don't call twice */
431 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
432 :
433 230 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
434 230 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
435 : }
436 : else
437 : {
438 : /* Don't call twice */
439 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
440 :
441 712 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
442 712 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
443 : }
444 :
445 942 : MemoryContextSwitchTo(oldContext);
446 : }
447 :
448 : /*
449 : * Compute stored generated columns for a tuple
450 : */
451 : void
452 1240 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
453 : EState *estate, TupleTableSlot *slot,
454 : CmdType cmdtype)
455 : {
456 1240 : Relation rel = resultRelInfo->ri_RelationDesc;
457 1240 : TupleDesc tupdesc = RelationGetDescr(rel);
458 1240 : int natts = tupdesc->natts;
459 1240 : ExprContext *econtext = GetPerTupleExprContext(estate);
460 : ExprState **ri_GeneratedExprs;
461 : MemoryContext oldContext;
462 : Datum *values;
463 : bool *nulls;
464 :
465 : /* We should not be called unless this is true */
466 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
467 :
468 : /*
469 : * Initialize the expressions if we didn't already, and check whether we
470 : * can exit early because nothing needs to be computed.
471 : */
472 1240 : if (cmdtype == CMD_UPDATE)
473 : {
474 266 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
475 204 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
476 266 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
477 18 : return;
478 248 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
479 : }
480 : else
481 : {
482 974 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
483 712 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
484 : /* Early exit is impossible given the prior Assert */
485 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
486 974 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
487 : }
488 :
489 1222 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
490 :
491 1222 : values = palloc(sizeof(*values) * natts);
492 1222 : nulls = palloc(sizeof(*nulls) * natts);
493 :
494 1222 : slot_getallattrs(slot);
495 1222 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
496 :
497 4606 : for (int i = 0; i < natts; i++)
498 : {
499 3396 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
500 :
501 3396 : if (ri_GeneratedExprs[i])
502 : {
503 : Datum val;
504 : bool isnull;
505 :
506 : Assert(attr->attgenerated == ATTRIBUTE_GENERATED_STORED);
507 :
508 1236 : econtext->ecxt_scantuple = slot;
509 :
510 1236 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
511 :
512 : /*
513 : * We must make a copy of val as we have no guarantees about where
514 : * memory for a pass-by-reference Datum is located.
515 : */
516 1224 : if (!isnull)
517 1182 : val = datumCopy(val, attr->attbyval, attr->attlen);
518 :
519 1224 : values[i] = val;
520 1224 : nulls[i] = isnull;
521 : }
522 : else
523 : {
524 2160 : if (!nulls[i])
525 2118 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
526 : }
527 : }
528 :
529 1210 : ExecClearTuple(slot);
530 1210 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
531 1210 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
532 1210 : ExecStoreVirtualTuple(slot);
533 1210 : ExecMaterializeSlot(slot);
534 :
535 1210 : MemoryContextSwitchTo(oldContext);
536 : }
537 :
538 : /*
539 : * ExecInitInsertProjection
540 : * Do one-time initialization of projection data for INSERT tuples.
541 : *
542 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
543 : *
544 : * This is also a convenient place to verify that the
545 : * output of an INSERT matches the target table.
546 : */
547 : static void
548 87754 : ExecInitInsertProjection(ModifyTableState *mtstate,
549 : ResultRelInfo *resultRelInfo)
550 : {
551 87754 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
552 87754 : Plan *subplan = outerPlan(node);
553 87754 : EState *estate = mtstate->ps.state;
554 87754 : List *insertTargetList = NIL;
555 87754 : bool need_projection = false;
556 : ListCell *l;
557 :
558 : /* Extract non-junk columns of the subplan's result tlist. */
559 267612 : foreach(l, subplan->targetlist)
560 : {
561 179858 : TargetEntry *tle = (TargetEntry *) lfirst(l);
562 :
563 179858 : if (!tle->resjunk)
564 179858 : insertTargetList = lappend(insertTargetList, tle);
565 : else
566 0 : need_projection = true;
567 : }
568 :
569 : /*
570 : * The junk-free list must produce a tuple suitable for the result
571 : * relation.
572 : */
573 87754 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
574 :
575 : /* We'll need a slot matching the table's format. */
576 87754 : resultRelInfo->ri_newTupleSlot =
577 87754 : table_slot_create(resultRelInfo->ri_RelationDesc,
578 : &estate->es_tupleTable);
579 :
580 : /* Build ProjectionInfo if needed (it probably isn't). */
581 87754 : if (need_projection)
582 : {
583 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
584 :
585 : /* need an expression context to do the projection */
586 0 : if (mtstate->ps.ps_ExprContext == NULL)
587 0 : ExecAssignExprContext(estate, &mtstate->ps);
588 :
589 0 : resultRelInfo->ri_projectNew =
590 0 : ExecBuildProjectionInfo(insertTargetList,
591 : mtstate->ps.ps_ExprContext,
592 : resultRelInfo->ri_newTupleSlot,
593 : &mtstate->ps,
594 : relDesc);
595 : }
596 :
597 87754 : resultRelInfo->ri_projectNewInfoValid = true;
598 87754 : }
599 :
600 : /*
601 : * ExecInitUpdateProjection
602 : * Do one-time initialization of projection data for UPDATE tuples.
603 : *
604 : * UPDATE always needs a projection, because (1) there's always some junk
605 : * attrs, and (2) we may need to merge values of not-updated columns from
606 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
607 : * the subplan contains only new values for the changed columns, plus row
608 : * identity info in the junk attrs.
609 : *
610 : * This is "one-time" for any given result rel, but we might touch more than
611 : * one result rel in the course of an inherited UPDATE, and each one needs
612 : * its own projection due to possible column order variation.
613 : *
614 : * This is also a convenient place to verify that the output of an UPDATE
615 : * matches the target table (ExecBuildUpdateProjection does that).
616 : */
617 : static void
618 11746 : ExecInitUpdateProjection(ModifyTableState *mtstate,
619 : ResultRelInfo *resultRelInfo)
620 : {
621 11746 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
622 11746 : Plan *subplan = outerPlan(node);
623 11746 : EState *estate = mtstate->ps.state;
624 11746 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
625 : int whichrel;
626 : List *updateColnos;
627 :
628 : /*
629 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
630 : * to, we can get the index the hard way with an integer division.
631 : */
632 11746 : whichrel = mtstate->mt_lastResultIndex;
633 11746 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
634 : {
635 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
636 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
637 : }
638 :
639 11746 : updateColnos = (List *) list_nth(node->updateColnosLists, whichrel);
640 :
641 : /*
642 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
643 : * produced by the subplan to get the new tuple. We need two slots, both
644 : * matching the table's desired format.
645 : */
646 11746 : resultRelInfo->ri_oldTupleSlot =
647 11746 : table_slot_create(resultRelInfo->ri_RelationDesc,
648 : &estate->es_tupleTable);
649 11746 : resultRelInfo->ri_newTupleSlot =
650 11746 : table_slot_create(resultRelInfo->ri_RelationDesc,
651 : &estate->es_tupleTable);
652 :
653 : /* need an expression context to do the projection */
654 11746 : if (mtstate->ps.ps_ExprContext == NULL)
655 10550 : ExecAssignExprContext(estate, &mtstate->ps);
656 :
657 11746 : resultRelInfo->ri_projectNew =
658 11746 : ExecBuildUpdateProjection(subplan->targetlist,
659 : false, /* subplan did the evaluation */
660 : updateColnos,
661 : relDesc,
662 : mtstate->ps.ps_ExprContext,
663 : resultRelInfo->ri_newTupleSlot,
664 : &mtstate->ps);
665 :
666 11746 : resultRelInfo->ri_projectNewInfoValid = true;
667 11746 : }
668 :
669 : /*
670 : * ExecGetInsertNewTuple
671 : * This prepares a "new" tuple ready to be inserted into given result
672 : * relation, by removing any junk columns of the plan's output tuple
673 : * and (if necessary) coercing the tuple to the right tuple format.
674 : */
675 : static TupleTableSlot *
676 11207412 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
677 : TupleTableSlot *planSlot)
678 : {
679 11207412 : ProjectionInfo *newProj = relinfo->ri_projectNew;
680 : ExprContext *econtext;
681 :
682 : /*
683 : * If there's no projection to be done, just make sure the slot is of the
684 : * right type for the target rel. If the planSlot is the right type we
685 : * can use it as-is, else copy the data into ri_newTupleSlot.
686 : */
687 11207412 : if (newProj == NULL)
688 : {
689 11207412 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
690 : {
691 10437066 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
692 10437066 : return relinfo->ri_newTupleSlot;
693 : }
694 : else
695 770346 : return planSlot;
696 : }
697 :
698 : /*
699 : * Else project; since the projection output slot is ri_newTupleSlot, this
700 : * will also fix any slot-type problem.
701 : *
702 : * Note: currently, this is dead code, because INSERT cases don't receive
703 : * any junk columns so there's never a projection to be done.
704 : */
705 0 : econtext = newProj->pi_exprContext;
706 0 : econtext->ecxt_outertuple = planSlot;
707 0 : return ExecProject(newProj);
708 : }
709 :
710 : /*
711 : * ExecGetUpdateNewTuple
712 : * This prepares a "new" tuple by combining an UPDATE subplan's output
713 : * tuple (which contains values of changed columns) with unchanged
714 : * columns taken from the old tuple.
715 : *
716 : * The subplan tuple might also contain junk columns, which are ignored.
717 : * Note that the projection also ensures we have a slot of the right type.
718 : */
719 : TupleTableSlot *
720 306816 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
721 : TupleTableSlot *planSlot,
722 : TupleTableSlot *oldSlot)
723 : {
724 306816 : ProjectionInfo *newProj = relinfo->ri_projectNew;
725 : ExprContext *econtext;
726 :
727 : /* Use a few extra Asserts to protect against outside callers */
728 : Assert(relinfo->ri_projectNewInfoValid);
729 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
730 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
731 :
732 306816 : econtext = newProj->pi_exprContext;
733 306816 : econtext->ecxt_outertuple = planSlot;
734 306816 : econtext->ecxt_scantuple = oldSlot;
735 306816 : return ExecProject(newProj);
736 : }
737 :
738 : /* ----------------------------------------------------------------
739 : * ExecInsert
740 : *
741 : * For INSERT, we have to insert the tuple into the target relation
742 : * (or partition thereof) and insert appropriate tuples into the index
743 : * relations.
744 : *
745 : * slot contains the new tuple value to be stored.
746 : *
747 : * Returns RETURNING result if any, otherwise NULL.
748 : * *inserted_tuple is the tuple that's effectively inserted;
749 : * *insert_destrel is the relation where it was inserted.
750 : * These are only set on success.
751 : *
752 : * This may change the currently active tuple conversion map in
753 : * mtstate->mt_transition_capture, so the callers must take care to
754 : * save the previous value to avoid losing track of it.
755 : * ----------------------------------------------------------------
756 : */
757 : static TupleTableSlot *
758 11210046 : ExecInsert(ModifyTableContext *context,
759 : ResultRelInfo *resultRelInfo,
760 : TupleTableSlot *slot,
761 : bool canSetTag,
762 : TupleTableSlot **inserted_tuple,
763 : ResultRelInfo **insert_destrel)
764 : {
765 11210046 : ModifyTableState *mtstate = context->mtstate;
766 11210046 : EState *estate = context->estate;
767 : Relation resultRelationDesc;
768 11210046 : List *recheckIndexes = NIL;
769 11210046 : TupleTableSlot *planSlot = context->planSlot;
770 11210046 : TupleTableSlot *result = NULL;
771 : TransitionCaptureState *ar_insert_trig_tcs;
772 11210046 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
773 11210046 : OnConflictAction onconflict = node->onConflictAction;
774 11210046 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
775 : MemoryContext oldContext;
776 :
777 : /*
778 : * If the input result relation is a partitioned table, find the leaf
779 : * partition to insert the tuple into.
780 : */
781 11210046 : if (proute)
782 : {
783 : ResultRelInfo *partRelInfo;
784 :
785 721468 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
786 : resultRelInfo, slot,
787 : &partRelInfo);
788 721264 : resultRelInfo = partRelInfo;
789 : }
790 :
791 11209842 : ExecMaterializeSlot(slot);
792 :
793 11209842 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
794 :
795 : /*
796 : * Open the table's indexes, if we have not done so already, so that we
797 : * can add new index entries for the inserted tuple.
798 : */
799 11209842 : if (resultRelationDesc->rd_rel->relhasindex &&
800 2816120 : resultRelInfo->ri_IndexRelationDescs == NULL)
801 28838 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
802 :
803 : /*
804 : * BEFORE ROW INSERT Triggers.
805 : *
806 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
807 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
808 : * violations before firing these triggers, because they can change the
809 : * values to insert. Also, they can run arbitrary user-defined code with
810 : * side-effects that we can't cancel by just not inserting the tuple.
811 : */
812 11209842 : if (resultRelInfo->ri_TrigDesc &&
813 74586 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
814 : {
815 : /* Flush any pending inserts, so rows are visible to the triggers */
816 2120 : if (estate->es_insert_pending_result_relations != NIL)
817 6 : ExecPendingInserts(estate);
818 :
819 2120 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
820 200 : return NULL; /* "do nothing" */
821 : }
822 :
823 : /* INSTEAD OF ROW INSERT Triggers */
824 11209526 : if (resultRelInfo->ri_TrigDesc &&
825 74270 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
826 : {
827 168 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
828 6 : return NULL; /* "do nothing" */
829 : }
830 11209358 : else if (resultRelInfo->ri_FdwRoutine)
831 : {
832 : /*
833 : * GENERATED expressions might reference the tableoid column, so
834 : * (re-)initialize tts_tableOid before evaluating them.
835 : */
836 2014 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
837 :
838 : /*
839 : * Compute stored generated columns
840 : */
841 2014 : if (resultRelationDesc->rd_att->constr &&
842 366 : resultRelationDesc->rd_att->constr->has_generated_stored)
843 8 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
844 : CMD_INSERT);
845 :
846 : /*
847 : * If the FDW supports batching, and batching is requested, accumulate
848 : * rows and insert them in batches. Otherwise use the per-row inserts.
849 : */
850 2014 : if (resultRelInfo->ri_BatchSize > 1)
851 : {
852 288 : bool flushed = false;
853 :
854 : /*
855 : * When we've reached the desired batch size, perform the
856 : * insertion.
857 : */
858 288 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
859 : {
860 20 : ExecBatchInsert(mtstate, resultRelInfo,
861 : resultRelInfo->ri_Slots,
862 : resultRelInfo->ri_PlanSlots,
863 : resultRelInfo->ri_NumSlots,
864 : estate, canSetTag);
865 20 : flushed = true;
866 : }
867 :
868 288 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
869 :
870 288 : if (resultRelInfo->ri_Slots == NULL)
871 : {
872 56 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
873 28 : resultRelInfo->ri_BatchSize);
874 28 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
875 28 : resultRelInfo->ri_BatchSize);
876 : }
877 :
878 : /*
879 : * Initialize the batch slots. We don't know how many slots will
880 : * be needed, so we initialize them as the batch grows, and we
881 : * keep them across batches. To mitigate an inefficiency in how
882 : * resource owner handles objects with many references (as with
883 : * many slots all referencing the same tuple descriptor) we copy
884 : * the appropriate tuple descriptor for each slot.
885 : */
886 288 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
887 : {
888 142 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
889 : TupleDesc plan_tdesc =
890 142 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
891 :
892 284 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
893 142 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
894 :
895 284 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
896 142 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
897 :
898 : /* remember how many batch slots we initialized */
899 142 : resultRelInfo->ri_NumSlotsInitialized++;
900 : }
901 :
902 288 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
903 : slot);
904 :
905 288 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
906 : planSlot);
907 :
908 : /*
909 : * If these are the first tuples stored in the buffers, add the
910 : * target rel and the mtstate to the
911 : * es_insert_pending_result_relations and
912 : * es_insert_pending_modifytables lists respectively, except in
913 : * the case where flushing was done above, in which case they
914 : * would already have been added to the lists, so no need to do
915 : * this.
916 : */
917 288 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
918 : {
919 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
920 : resultRelInfo));
921 36 : estate->es_insert_pending_result_relations =
922 36 : lappend(estate->es_insert_pending_result_relations,
923 : resultRelInfo);
924 36 : estate->es_insert_pending_modifytables =
925 36 : lappend(estate->es_insert_pending_modifytables, mtstate);
926 : }
927 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
928 : resultRelInfo));
929 :
930 288 : resultRelInfo->ri_NumSlots++;
931 :
932 288 : MemoryContextSwitchTo(oldContext);
933 :
934 288 : return NULL;
935 : }
936 :
937 : /*
938 : * insert into foreign table: let the FDW do it
939 : */
940 1726 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
941 : resultRelInfo,
942 : slot,
943 : planSlot);
944 :
945 1720 : if (slot == NULL) /* "do nothing" */
946 4 : return NULL;
947 :
948 : /*
949 : * AFTER ROW Triggers or RETURNING expressions might reference the
950 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
951 : * them. (This covers the case where the FDW replaced the slot.)
952 : */
953 1716 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
954 : }
955 : else
956 : {
957 : WCOKind wco_kind;
958 :
959 : /*
960 : * Constraints and GENERATED expressions might reference the tableoid
961 : * column, so (re-)initialize tts_tableOid before evaluating them.
962 : */
963 11207344 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
964 :
965 : /*
966 : * Compute stored generated columns
967 : */
968 11207344 : if (resultRelationDesc->rd_att->constr &&
969 3035634 : resultRelationDesc->rd_att->constr->has_generated_stored)
970 922 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
971 : CMD_INSERT);
972 :
973 : /*
974 : * Check any RLS WITH CHECK policies.
975 : *
976 : * Normally we should check INSERT policies. But if the insert is the
977 : * result of a partition key update that moved the tuple to a new
978 : * partition, we should instead check UPDATE policies, because we are
979 : * executing policies defined on the target table, and not those
980 : * defined on the child partitions.
981 : *
982 : * If we're running MERGE, we refer to the action that we're executing
983 : * to know if we're doing an INSERT or UPDATE to a partition table.
984 : */
985 11207332 : if (mtstate->operation == CMD_UPDATE)
986 686 : wco_kind = WCO_RLS_UPDATE_CHECK;
987 11206646 : else if (mtstate->operation == CMD_MERGE)
988 1708 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
989 1708 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
990 : else
991 11204938 : wco_kind = WCO_RLS_INSERT_CHECK;
992 :
993 : /*
994 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
995 : * we are looking for at this point.
996 : */
997 11207332 : if (resultRelInfo->ri_WithCheckOptions != NIL)
998 552 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
999 :
1000 : /*
1001 : * Check the constraints of the tuple.
1002 : */
1003 11207158 : if (resultRelationDesc->rd_att->constr)
1004 3035538 : ExecConstraints(resultRelInfo, slot, estate);
1005 :
1006 : /*
1007 : * Also check the tuple against the partition constraint, if there is
1008 : * one; except that if we got here via tuple-routing, we don't need to
1009 : * if there's no BR trigger defined on the partition.
1010 : */
1011 11206524 : if (resultRelationDesc->rd_rel->relispartition &&
1012 725498 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1013 720686 : (resultRelInfo->ri_TrigDesc &&
1014 1328 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1015 5008 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1016 :
1017 11206356 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1018 4010 : {
1019 : /* Perform a speculative insertion. */
1020 : uint32 specToken;
1021 : ItemPointerData conflictTid;
1022 : ItemPointerData invalidItemPtr;
1023 : bool specConflict;
1024 : List *arbiterIndexes;
1025 :
1026 9386 : ItemPointerSetInvalid(&invalidItemPtr);
1027 9386 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1028 :
1029 : /*
1030 : * Do a non-conclusive check for conflicts first.
1031 : *
1032 : * We're not holding any locks yet, so this doesn't guarantee that
1033 : * the later insert won't conflict. But it avoids leaving behind
1034 : * a lot of canceled speculative insertions, if you run a lot of
1035 : * INSERT ON CONFLICT statements that do conflict.
1036 : *
1037 : * We loop back here if we find a conflict below, either during
1038 : * the pre-check, or when we re-check after inserting the tuple
1039 : * speculatively. Better allow interrupts in case some bug makes
1040 : * this an infinite loop.
1041 : */
1042 9396 : vlock:
1043 9396 : CHECK_FOR_INTERRUPTS();
1044 9396 : specConflict = false;
1045 9396 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1046 : &conflictTid, &invalidItemPtr,
1047 : arbiterIndexes))
1048 : {
1049 : /* committed conflict tuple found */
1050 5364 : if (onconflict == ONCONFLICT_UPDATE)
1051 : {
1052 : /*
1053 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1054 : * part. Be prepared to retry if the UPDATE fails because
1055 : * of another concurrent UPDATE/DELETE to the conflict
1056 : * tuple.
1057 : */
1058 5200 : TupleTableSlot *returning = NULL;
1059 :
1060 5200 : if (ExecOnConflictUpdate(context, resultRelInfo,
1061 : &conflictTid, slot, canSetTag,
1062 : &returning))
1063 : {
1064 5122 : InstrCountTuples2(&mtstate->ps, 1);
1065 5122 : return returning;
1066 : }
1067 : else
1068 0 : goto vlock;
1069 : }
1070 : else
1071 : {
1072 : /*
1073 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1074 : * verify that the tuple is visible to the executor's MVCC
1075 : * snapshot at higher isolation levels.
1076 : *
1077 : * Using ExecGetReturningSlot() to store the tuple for the
1078 : * recheck isn't that pretty, but we can't trivially use
1079 : * the input slot, because it might not be of a compatible
1080 : * type. As there's no conflicting usage of
1081 : * ExecGetReturningSlot() in the DO NOTHING case...
1082 : */
1083 : Assert(onconflict == ONCONFLICT_NOTHING);
1084 164 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1085 : ExecGetReturningSlot(estate, resultRelInfo));
1086 144 : InstrCountTuples2(&mtstate->ps, 1);
1087 144 : return NULL;
1088 : }
1089 : }
1090 :
1091 : /*
1092 : * Before we start insertion proper, acquire our "speculative
1093 : * insertion lock". Others can use that to wait for us to decide
1094 : * if we're going to go ahead with the insertion, instead of
1095 : * waiting for the whole transaction to complete.
1096 : */
1097 4026 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1098 :
1099 : /* insert the tuple, with the speculative token */
1100 4026 : table_tuple_insert_speculative(resultRelationDesc, slot,
1101 : estate->es_output_cid,
1102 : 0,
1103 : NULL,
1104 : specToken);
1105 :
1106 : /* insert index entries for tuple */
1107 4026 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1108 : slot, estate, false, true,
1109 : &specConflict,
1110 : arbiterIndexes,
1111 : false);
1112 :
1113 : /* adjust the tuple's state accordingly */
1114 4020 : table_tuple_complete_speculative(resultRelationDesc, slot,
1115 4020 : specToken, !specConflict);
1116 :
1117 : /*
1118 : * Wake up anyone waiting for our decision. They will re-check
1119 : * the tuple, see that it's no longer speculative, and wait on our
1120 : * XID as if this was a regularly inserted tuple all along. Or if
1121 : * we killed the tuple, they will see it's dead, and proceed as if
1122 : * the tuple never existed.
1123 : */
1124 4020 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1125 :
1126 : /*
1127 : * If there was a conflict, start from the beginning. We'll do
1128 : * the pre-check again, which will now find the conflicting tuple
1129 : * (unless it aborts before we get there).
1130 : */
1131 4020 : if (specConflict)
1132 : {
1133 10 : list_free(recheckIndexes);
1134 10 : goto vlock;
1135 : }
1136 :
1137 : /* Since there was no insertion conflict, we're done */
1138 : }
1139 : else
1140 : {
1141 : /* insert the tuple normally */
1142 11196970 : table_tuple_insert(resultRelationDesc, slot,
1143 : estate->es_output_cid,
1144 : 0, NULL);
1145 :
1146 : /* insert index entries for tuple */
1147 11196946 : if (resultRelInfo->ri_NumIndices > 0)
1148 2806040 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1149 : slot, estate, false,
1150 : false, NULL, NIL,
1151 : false);
1152 : }
1153 : }
1154 :
1155 11202310 : if (canSetTag)
1156 11201138 : (estate->es_processed)++;
1157 :
1158 : /*
1159 : * If this insert is the result of a partition key update that moved the
1160 : * tuple to a new partition, put this row into the transition NEW TABLE,
1161 : * if there is one. We need to do this separately for DELETE and INSERT
1162 : * because they happen on different tables.
1163 : */
1164 11202310 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1165 11202310 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1166 42 : && mtstate->mt_transition_capture->tcs_update_new_table)
1167 : {
1168 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1169 : NULL, NULL,
1170 : NULL,
1171 : NULL,
1172 : slot,
1173 : NULL,
1174 42 : mtstate->mt_transition_capture,
1175 : false);
1176 :
1177 : /*
1178 : * We've already captured the NEW TABLE row, so make sure any AR
1179 : * INSERT trigger fired below doesn't capture it again.
1180 : */
1181 42 : ar_insert_trig_tcs = NULL;
1182 : }
1183 :
1184 : /* AFTER ROW INSERT Triggers */
1185 11202310 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1186 : ar_insert_trig_tcs);
1187 :
1188 11202310 : list_free(recheckIndexes);
1189 :
1190 : /*
1191 : * Check any WITH CHECK OPTION constraints from parent views. We are
1192 : * required to do this after testing all constraints and uniqueness
1193 : * violations per the SQL spec, so we do it after actually inserting the
1194 : * record into the heap and all indexes.
1195 : *
1196 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1197 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1198 : *
1199 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1200 : * are looking for at this point.
1201 : */
1202 11202310 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1203 364 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1204 :
1205 : /* Process RETURNING if present */
1206 11202164 : if (resultRelInfo->ri_projectReturning)
1207 3464 : result = ExecProcessReturning(resultRelInfo, slot, planSlot);
1208 :
1209 11202152 : if (inserted_tuple)
1210 712 : *inserted_tuple = slot;
1211 11202152 : if (insert_destrel)
1212 712 : *insert_destrel = resultRelInfo;
1213 :
1214 11202152 : return result;
1215 : }
1216 :
1217 : /* ----------------------------------------------------------------
1218 : * ExecBatchInsert
1219 : *
1220 : * Insert multiple tuples in an efficient way.
1221 : * Currently, this handles inserting into a foreign table without
1222 : * RETURNING clause.
1223 : * ----------------------------------------------------------------
1224 : */
1225 : static void
1226 56 : ExecBatchInsert(ModifyTableState *mtstate,
1227 : ResultRelInfo *resultRelInfo,
1228 : TupleTableSlot **slots,
1229 : TupleTableSlot **planSlots,
1230 : int numSlots,
1231 : EState *estate,
1232 : bool canSetTag)
1233 : {
1234 : int i;
1235 56 : int numInserted = numSlots;
1236 56 : TupleTableSlot *slot = NULL;
1237 : TupleTableSlot **rslots;
1238 :
1239 : /*
1240 : * insert into foreign table: let the FDW do it
1241 : */
1242 56 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1243 : resultRelInfo,
1244 : slots,
1245 : planSlots,
1246 : &numInserted);
1247 :
1248 344 : for (i = 0; i < numInserted; i++)
1249 : {
1250 288 : slot = rslots[i];
1251 :
1252 : /*
1253 : * AFTER ROW Triggers might reference the tableoid column, so
1254 : * (re-)initialize tts_tableOid before evaluating them.
1255 : */
1256 288 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1257 :
1258 : /* AFTER ROW INSERT Triggers */
1259 288 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1260 288 : mtstate->mt_transition_capture);
1261 :
1262 : /*
1263 : * Check any WITH CHECK OPTION constraints from parent views. See the
1264 : * comment in ExecInsert.
1265 : */
1266 288 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1267 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1268 : }
1269 :
1270 56 : if (canSetTag && numInserted > 0)
1271 56 : estate->es_processed += numInserted;
1272 :
1273 : /* Clean up all the slots, ready for the next batch */
1274 344 : for (i = 0; i < numSlots; i++)
1275 : {
1276 288 : ExecClearTuple(slots[i]);
1277 288 : ExecClearTuple(planSlots[i]);
1278 : }
1279 56 : resultRelInfo->ri_NumSlots = 0;
1280 56 : }
1281 :
1282 : /*
1283 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1284 : */
1285 : static void
1286 34 : ExecPendingInserts(EState *estate)
1287 : {
1288 : ListCell *l1,
1289 : *l2;
1290 :
1291 70 : forboth(l1, estate->es_insert_pending_result_relations,
1292 : l2, estate->es_insert_pending_modifytables)
1293 : {
1294 36 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1295 36 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1296 :
1297 : Assert(mtstate);
1298 36 : ExecBatchInsert(mtstate, resultRelInfo,
1299 : resultRelInfo->ri_Slots,
1300 : resultRelInfo->ri_PlanSlots,
1301 : resultRelInfo->ri_NumSlots,
1302 36 : estate, mtstate->canSetTag);
1303 : }
1304 :
1305 34 : list_free(estate->es_insert_pending_result_relations);
1306 34 : list_free(estate->es_insert_pending_modifytables);
1307 34 : estate->es_insert_pending_result_relations = NIL;
1308 34 : estate->es_insert_pending_modifytables = NIL;
1309 34 : }
1310 :
1311 : /*
1312 : * ExecDeletePrologue -- subroutine for ExecDelete
1313 : *
1314 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1315 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1316 : * the delete a no-op; otherwise, return true.
1317 : */
1318 : static bool
1319 1525946 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1320 : ItemPointer tupleid, HeapTuple oldtuple,
1321 : TupleTableSlot **epqreturnslot, TM_Result *result)
1322 : {
1323 1525946 : if (result)
1324 1322 : *result = TM_Ok;
1325 :
1326 : /* BEFORE ROW DELETE triggers */
1327 1525946 : if (resultRelInfo->ri_TrigDesc &&
1328 6880 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1329 : {
1330 : /* Flush any pending inserts, so rows are visible to the triggers */
1331 388 : if (context->estate->es_insert_pending_result_relations != NIL)
1332 2 : ExecPendingInserts(context->estate);
1333 :
1334 388 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1335 : resultRelInfo, tupleid, oldtuple,
1336 : epqreturnslot, result, &context->tmfd);
1337 : }
1338 :
1339 1525558 : return true;
1340 : }
1341 :
1342 : /*
1343 : * ExecDeleteAct -- subroutine for ExecDelete
1344 : *
1345 : * Actually delete the tuple from a plain table.
1346 : *
1347 : * Caller is in charge of doing EvalPlanQual as necessary
1348 : */
1349 : static TM_Result
1350 1525758 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1351 : ItemPointer tupleid, bool changingPart)
1352 : {
1353 1525758 : EState *estate = context->estate;
1354 :
1355 1525758 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1356 : estate->es_output_cid,
1357 : estate->es_snapshot,
1358 : estate->es_crosscheck_snapshot,
1359 : true /* wait for commit */ ,
1360 : &context->tmfd,
1361 : changingPart);
1362 : }
1363 :
1364 : /*
1365 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1366 : *
1367 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1368 : * including the UPDATE triggers if the deletion is being done as part of a
1369 : * cross-partition tuple move.
1370 : */
1371 : static void
1372 1525698 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1373 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1374 : {
1375 1525698 : ModifyTableState *mtstate = context->mtstate;
1376 1525698 : EState *estate = context->estate;
1377 : TransitionCaptureState *ar_delete_trig_tcs;
1378 :
1379 : /*
1380 : * If this delete is the result of a partition key update that moved the
1381 : * tuple to a new partition, put this row into the transition OLD TABLE,
1382 : * if there is one. We need to do this separately for DELETE and INSERT
1383 : * because they happen on different tables.
1384 : */
1385 1525698 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1386 1525698 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1387 42 : mtstate->mt_transition_capture->tcs_update_old_table)
1388 : {
1389 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1390 : NULL, NULL,
1391 : tupleid, oldtuple,
1392 42 : NULL, NULL, mtstate->mt_transition_capture,
1393 : false);
1394 :
1395 : /*
1396 : * We've already captured the OLD TABLE row, so make sure any AR
1397 : * DELETE trigger fired below doesn't capture it again.
1398 : */
1399 42 : ar_delete_trig_tcs = NULL;
1400 : }
1401 :
1402 : /* AFTER ROW DELETE Triggers */
1403 1525698 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1404 : ar_delete_trig_tcs, changingPart);
1405 1525698 : }
1406 :
1407 : /* ----------------------------------------------------------------
1408 : * ExecDelete
1409 : *
1410 : * DELETE is like UPDATE, except that we delete the tuple and no
1411 : * index modifications are needed.
1412 : *
1413 : * When deleting from a table, tupleid identifies the tuple to delete and
1414 : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1415 : * oldtuple is passed to the triggers and identifies what to delete, and
1416 : * tupleid is invalid. When deleting from a foreign table, tupleid is
1417 : * invalid; the FDW has to figure out which row to delete using data from
1418 : * the planSlot. oldtuple is passed to foreign table triggers; it is
1419 : * NULL when the foreign table has no relevant triggers. We use
1420 : * tupleDeleted to indicate whether the tuple is actually deleted,
1421 : * callers can use it to decide whether to continue the operation. When
1422 : * this DELETE is a part of an UPDATE of partition-key, then the slot
1423 : * returned by EvalPlanQual() is passed back using output parameter
1424 : * epqreturnslot.
1425 : *
1426 : * Returns RETURNING result if any, otherwise NULL.
1427 : * ----------------------------------------------------------------
1428 : */
1429 : static TupleTableSlot *
1430 1525570 : ExecDelete(ModifyTableContext *context,
1431 : ResultRelInfo *resultRelInfo,
1432 : ItemPointer tupleid,
1433 : HeapTuple oldtuple,
1434 : bool processReturning,
1435 : bool changingPart,
1436 : bool canSetTag,
1437 : TM_Result *tmresult,
1438 : bool *tupleDeleted,
1439 : TupleTableSlot **epqreturnslot)
1440 : {
1441 1525570 : EState *estate = context->estate;
1442 1525570 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1443 1525570 : TupleTableSlot *slot = NULL;
1444 : TM_Result result;
1445 :
1446 1525570 : if (tupleDeleted)
1447 946 : *tupleDeleted = false;
1448 :
1449 : /*
1450 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1451 : * done if it says we are.
1452 : */
1453 1525570 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1454 : epqreturnslot, tmresult))
1455 52 : return NULL;
1456 :
1457 : /* INSTEAD OF ROW DELETE Triggers */
1458 1525484 : if (resultRelInfo->ri_TrigDesc &&
1459 6738 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1460 48 : {
1461 : bool dodelete;
1462 :
1463 : Assert(oldtuple != NULL);
1464 54 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1465 :
1466 54 : if (!dodelete) /* "do nothing" */
1467 6 : return NULL;
1468 : }
1469 1525430 : else if (resultRelInfo->ri_FdwRoutine)
1470 : {
1471 : /*
1472 : * delete from foreign table: let the FDW do it
1473 : *
1474 : * We offer the returning slot as a place to store RETURNING data,
1475 : * although the FDW can return some other slot if it wants.
1476 : */
1477 34 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1478 34 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1479 : resultRelInfo,
1480 : slot,
1481 : context->planSlot);
1482 :
1483 34 : if (slot == NULL) /* "do nothing" */
1484 0 : return NULL;
1485 :
1486 : /*
1487 : * RETURNING expressions might reference the tableoid column, so
1488 : * (re)initialize tts_tableOid before evaluating them.
1489 : */
1490 34 : if (TTS_EMPTY(slot))
1491 6 : ExecStoreAllNullTuple(slot);
1492 :
1493 34 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1494 : }
1495 : else
1496 : {
1497 : /*
1498 : * delete the tuple
1499 : *
1500 : * Note: if context->estate->es_crosscheck_snapshot isn't
1501 : * InvalidSnapshot, we check that the row to be deleted is visible to
1502 : * that snapshot, and throw a can't-serialize error if not. This is a
1503 : * special-case behavior needed for referential integrity updates in
1504 : * transaction-snapshot mode transactions.
1505 : */
1506 1525396 : ldelete:
1507 1525400 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1508 :
1509 1525364 : if (tmresult)
1510 912 : *tmresult = result;
1511 :
1512 1525364 : switch (result)
1513 : {
1514 30 : case TM_SelfModified:
1515 :
1516 : /*
1517 : * The target tuple was already updated or deleted by the
1518 : * current command, or by a later command in the current
1519 : * transaction. The former case is possible in a join DELETE
1520 : * where multiple tuples join to the same target tuple. This
1521 : * is somewhat questionable, but Postgres has always allowed
1522 : * it: we just ignore additional deletion attempts.
1523 : *
1524 : * The latter case arises if the tuple is modified by a
1525 : * command in a BEFORE trigger, or perhaps by a command in a
1526 : * volatile function used in the query. In such situations we
1527 : * should not ignore the deletion, but it is equally unsafe to
1528 : * proceed. We don't want to discard the original DELETE
1529 : * while keeping the triggered actions based on its deletion;
1530 : * and it would be no better to allow the original DELETE
1531 : * while discarding updates that it triggered. The row update
1532 : * carries some information that might be important according
1533 : * to business rules; so throwing an error is the only safe
1534 : * course.
1535 : *
1536 : * If a trigger actually intends this type of interaction, it
1537 : * can re-execute the DELETE and then return NULL to cancel
1538 : * the outer delete.
1539 : */
1540 30 : if (context->tmfd.cmax != estate->es_output_cid)
1541 6 : ereport(ERROR,
1542 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1543 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1544 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1545 :
1546 : /* Else, already deleted by self; nothing to do */
1547 24 : return NULL;
1548 :
1549 1525270 : case TM_Ok:
1550 1525270 : break;
1551 :
1552 58 : case TM_Updated:
1553 : {
1554 : TupleTableSlot *inputslot;
1555 : TupleTableSlot *epqslot;
1556 :
1557 58 : if (IsolationUsesXactSnapshot())
1558 2 : ereport(ERROR,
1559 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1560 : errmsg("could not serialize access due to concurrent update")));
1561 :
1562 : /*
1563 : * Already know that we're going to need to do EPQ, so
1564 : * fetch tuple directly into the right slot.
1565 : */
1566 56 : EvalPlanQualBegin(context->epqstate);
1567 56 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1568 : resultRelInfo->ri_RangeTableIndex);
1569 :
1570 56 : result = table_tuple_lock(resultRelationDesc, tupleid,
1571 : estate->es_snapshot,
1572 : inputslot, estate->es_output_cid,
1573 : LockTupleExclusive, LockWaitBlock,
1574 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1575 : &context->tmfd);
1576 :
1577 52 : switch (result)
1578 : {
1579 46 : case TM_Ok:
1580 : Assert(context->tmfd.traversed);
1581 46 : epqslot = EvalPlanQual(context->epqstate,
1582 : resultRelationDesc,
1583 : resultRelInfo->ri_RangeTableIndex,
1584 : inputslot);
1585 46 : if (TupIsNull(epqslot))
1586 : /* Tuple not passing quals anymore, exiting... */
1587 30 : return NULL;
1588 :
1589 : /*
1590 : * If requested, skip delete and pass back the
1591 : * updated row.
1592 : */
1593 16 : if (epqreturnslot)
1594 : {
1595 12 : *epqreturnslot = epqslot;
1596 12 : return NULL;
1597 : }
1598 : else
1599 4 : goto ldelete;
1600 :
1601 4 : case TM_SelfModified:
1602 :
1603 : /*
1604 : * This can be reached when following an update
1605 : * chain from a tuple updated by another session,
1606 : * reaching a tuple that was already updated in
1607 : * this transaction. If previously updated by this
1608 : * command, ignore the delete, otherwise error
1609 : * out.
1610 : *
1611 : * See also TM_SelfModified response to
1612 : * table_tuple_delete() above.
1613 : */
1614 4 : if (context->tmfd.cmax != estate->es_output_cid)
1615 2 : ereport(ERROR,
1616 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1617 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1618 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1619 2 : return NULL;
1620 :
1621 2 : case TM_Deleted:
1622 : /* tuple already deleted; nothing to do */
1623 2 : return NULL;
1624 :
1625 0 : default:
1626 :
1627 : /*
1628 : * TM_Invisible should be impossible because we're
1629 : * waiting for updated row versions, and would
1630 : * already have errored out if the first version
1631 : * is invisible.
1632 : *
1633 : * TM_Updated should be impossible, because we're
1634 : * locking the latest version via
1635 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1636 : */
1637 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1638 : result);
1639 : return NULL;
1640 : }
1641 :
1642 : Assert(false);
1643 : break;
1644 : }
1645 :
1646 6 : case TM_Deleted:
1647 6 : if (IsolationUsesXactSnapshot())
1648 0 : ereport(ERROR,
1649 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1650 : errmsg("could not serialize access due to concurrent delete")));
1651 : /* tuple already deleted; nothing to do */
1652 6 : return NULL;
1653 :
1654 0 : default:
1655 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1656 : result);
1657 : return NULL;
1658 : }
1659 :
1660 : /*
1661 : * Note: Normally one would think that we have to delete index tuples
1662 : * associated with the heap tuple now...
1663 : *
1664 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1665 : * take care of it later. We can't delete index tuples immediately
1666 : * anyway, since the tuple is still visible to other transactions.
1667 : */
1668 : }
1669 :
1670 1525352 : if (canSetTag)
1671 1524256 : (estate->es_processed)++;
1672 :
1673 : /* Tell caller that the delete actually happened. */
1674 1525352 : if (tupleDeleted)
1675 868 : *tupleDeleted = true;
1676 :
1677 1525352 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1678 :
1679 : /* Process RETURNING if present and if requested */
1680 1525352 : if (processReturning && resultRelInfo->ri_projectReturning)
1681 : {
1682 : /*
1683 : * We have to put the target tuple into a slot, which means first we
1684 : * gotta fetch it. We can use the trigger tuple slot.
1685 : */
1686 : TupleTableSlot *rslot;
1687 :
1688 874 : if (resultRelInfo->ri_FdwRoutine)
1689 : {
1690 : /* FDW must have provided a slot containing the deleted row */
1691 : Assert(!TupIsNull(slot));
1692 : }
1693 : else
1694 : {
1695 868 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1696 868 : if (oldtuple != NULL)
1697 : {
1698 24 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1699 : }
1700 : else
1701 : {
1702 844 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1703 : SnapshotAny, slot))
1704 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1705 : }
1706 : }
1707 :
1708 874 : rslot = ExecProcessReturning(resultRelInfo, slot, context->planSlot);
1709 :
1710 : /*
1711 : * Before releasing the target tuple again, make sure rslot has a
1712 : * local copy of any pass-by-reference values.
1713 : */
1714 874 : ExecMaterializeSlot(rslot);
1715 :
1716 874 : ExecClearTuple(slot);
1717 :
1718 874 : return rslot;
1719 : }
1720 :
1721 1524478 : return NULL;
1722 : }
1723 :
1724 : /*
1725 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1726 : *
1727 : * This works by first deleting the old tuple from the current partition,
1728 : * followed by inserting the new tuple into the root parent table, that is,
1729 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1730 : * correct partition.
1731 : *
1732 : * Returns true if the tuple has been successfully moved, or if it's found
1733 : * that the tuple was concurrently deleted so there's nothing more to do
1734 : * for the caller.
1735 : *
1736 : * False is returned if the tuple we're trying to move is found to have been
1737 : * concurrently updated. In that case, the caller must check if the updated
1738 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1739 : * this function again or perform a regular update accordingly. For MERGE,
1740 : * the updated tuple is not returned in *retry_slot; it has its own retry
1741 : * logic.
1742 : */
1743 : static bool
1744 994 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1745 : ResultRelInfo *resultRelInfo,
1746 : ItemPointer tupleid, HeapTuple oldtuple,
1747 : TupleTableSlot *slot,
1748 : bool canSetTag,
1749 : UpdateContext *updateCxt,
1750 : TM_Result *tmresult,
1751 : TupleTableSlot **retry_slot,
1752 : TupleTableSlot **inserted_tuple,
1753 : ResultRelInfo **insert_destrel)
1754 : {
1755 994 : ModifyTableState *mtstate = context->mtstate;
1756 994 : EState *estate = mtstate->ps.state;
1757 : TupleConversionMap *tupconv_map;
1758 : bool tuple_deleted;
1759 994 : TupleTableSlot *epqslot = NULL;
1760 :
1761 994 : context->cpUpdateReturningSlot = NULL;
1762 994 : *retry_slot = NULL;
1763 :
1764 : /*
1765 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1766 : * to migrate to a different partition. Maybe this can be implemented
1767 : * some day, but it seems a fringe feature with little redeeming value.
1768 : */
1769 994 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1770 0 : ereport(ERROR,
1771 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1772 : errmsg("invalid ON UPDATE specification"),
1773 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1774 :
1775 : /*
1776 : * When an UPDATE is run directly on a leaf partition, simply fail with a
1777 : * partition constraint violation error.
1778 : */
1779 994 : if (resultRelInfo == mtstate->rootResultRelInfo)
1780 48 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1781 :
1782 : /* Initialize tuple routing info if not already done. */
1783 946 : if (mtstate->mt_partition_tuple_routing == NULL)
1784 : {
1785 590 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1786 : MemoryContext oldcxt;
1787 :
1788 : /* Things built here have to last for the query duration. */
1789 590 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1790 :
1791 590 : mtstate->mt_partition_tuple_routing =
1792 590 : ExecSetupPartitionTupleRouting(estate, rootRel);
1793 :
1794 : /*
1795 : * Before a partition's tuple can be re-routed, it must first be
1796 : * converted to the root's format, so we'll need a slot for storing
1797 : * such tuples.
1798 : */
1799 : Assert(mtstate->mt_root_tuple_slot == NULL);
1800 590 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1801 :
1802 590 : MemoryContextSwitchTo(oldcxt);
1803 : }
1804 :
1805 : /*
1806 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1807 : * We want to return rows from INSERT.
1808 : */
1809 946 : ExecDelete(context, resultRelInfo,
1810 : tupleid, oldtuple,
1811 : false, /* processReturning */
1812 : true, /* changingPart */
1813 : false, /* canSetTag */
1814 : tmresult, &tuple_deleted, &epqslot);
1815 :
1816 : /*
1817 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
1818 : * it was already deleted by self, or it was concurrently deleted by
1819 : * another transaction), then we should skip the insert as well;
1820 : * otherwise, an UPDATE could cause an increase in the total number of
1821 : * rows across all partitions, which is clearly wrong.
1822 : *
1823 : * For a normal UPDATE, the case where the tuple has been the subject of a
1824 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
1825 : * machinery, but for an UPDATE that we've translated into a DELETE from
1826 : * this partition and an INSERT into some other partition, that's not
1827 : * available, because CTID chains can't span relation boundaries. We
1828 : * mimic the semantics to a limited extent by skipping the INSERT if the
1829 : * DELETE fails to find a tuple. This ensures that two concurrent
1830 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
1831 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
1832 : * it.
1833 : */
1834 944 : if (!tuple_deleted)
1835 : {
1836 : /*
1837 : * epqslot will be typically NULL. But when ExecDelete() finds that
1838 : * another transaction has concurrently updated the same row, it
1839 : * re-fetches the row, skips the delete, and epqslot is set to the
1840 : * re-fetched tuple slot. In that case, we need to do all the checks
1841 : * again. For MERGE, we leave everything to the caller (it must do
1842 : * additional rechecking, and might end up executing a different
1843 : * action entirely).
1844 : */
1845 76 : if (mtstate->operation == CMD_MERGE)
1846 34 : return *tmresult == TM_Ok;
1847 42 : else if (TupIsNull(epqslot))
1848 36 : return true;
1849 : else
1850 : {
1851 : /* Fetch the most recent version of old tuple. */
1852 : TupleTableSlot *oldSlot;
1853 :
1854 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
1855 6 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
1856 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
1857 6 : oldSlot = resultRelInfo->ri_oldTupleSlot;
1858 6 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
1859 : tupleid,
1860 : SnapshotAny,
1861 : oldSlot))
1862 0 : elog(ERROR, "failed to fetch tuple being updated");
1863 : /* and project the new tuple to retry the UPDATE with */
1864 6 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
1865 : oldSlot);
1866 6 : return false;
1867 : }
1868 : }
1869 :
1870 : /*
1871 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
1872 : * convert the tuple into root's tuple descriptor if needed, since
1873 : * ExecInsert() starts the search from root.
1874 : */
1875 868 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1876 868 : if (tupconv_map != NULL)
1877 302 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1878 : slot,
1879 : mtstate->mt_root_tuple_slot);
1880 :
1881 : /* Tuple routing starts from the root table. */
1882 740 : context->cpUpdateReturningSlot =
1883 868 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
1884 : inserted_tuple, insert_destrel);
1885 :
1886 : /*
1887 : * Reset the transition state that may possibly have been written by
1888 : * INSERT.
1889 : */
1890 740 : if (mtstate->mt_transition_capture)
1891 42 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
1892 :
1893 : /* We're done moving. */
1894 740 : return true;
1895 : }
1896 :
1897 : /*
1898 : * ExecUpdatePrologue -- subroutine for ExecUpdate
1899 : *
1900 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
1901 : * triggers. We return false if one of them makes the update a no-op;
1902 : * otherwise, return true.
1903 : */
1904 : static bool
1905 313946 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1906 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1907 : TM_Result *result)
1908 : {
1909 313946 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1910 :
1911 313946 : if (result)
1912 2122 : *result = TM_Ok;
1913 :
1914 313946 : ExecMaterializeSlot(slot);
1915 :
1916 : /*
1917 : * Open the table's indexes, if we have not done so already, so that we
1918 : * can add new index entries for the updated tuple.
1919 : */
1920 313946 : if (resultRelationDesc->rd_rel->relhasindex &&
1921 223436 : resultRelInfo->ri_IndexRelationDescs == NULL)
1922 7478 : ExecOpenIndices(resultRelInfo, false);
1923 :
1924 : /* BEFORE ROW UPDATE triggers */
1925 313946 : if (resultRelInfo->ri_TrigDesc &&
1926 5920 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
1927 : {
1928 : /* Flush any pending inserts, so rows are visible to the triggers */
1929 2572 : if (context->estate->es_insert_pending_result_relations != NIL)
1930 2 : ExecPendingInserts(context->estate);
1931 :
1932 2572 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
1933 : resultRelInfo, tupleid, oldtuple, slot,
1934 : result, &context->tmfd);
1935 : }
1936 :
1937 311374 : return true;
1938 : }
1939 :
1940 : /*
1941 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
1942 : *
1943 : * Apply the final modifications to the tuple slot before the update.
1944 : * (This is split out because we also need it in the foreign-table code path.)
1945 : */
1946 : static void
1947 313668 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
1948 : TupleTableSlot *slot,
1949 : EState *estate)
1950 : {
1951 313668 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1952 :
1953 : /*
1954 : * Constraints and GENERATED expressions might reference the tableoid
1955 : * column, so (re-)initialize tts_tableOid before evaluating them.
1956 : */
1957 313668 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1958 :
1959 : /*
1960 : * Compute stored generated columns
1961 : */
1962 313668 : if (resultRelationDesc->rd_att->constr &&
1963 186586 : resultRelationDesc->rd_att->constr->has_generated_stored)
1964 260 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1965 : CMD_UPDATE);
1966 313668 : }
1967 :
1968 : /*
1969 : * ExecUpdateAct -- subroutine for ExecUpdate
1970 : *
1971 : * Actually update the tuple, when operating on a plain table. If the
1972 : * table is a partition, and the command was called referencing an ancestor
1973 : * partitioned table, this routine migrates the resulting tuple to another
1974 : * partition.
1975 : *
1976 : * The caller is in charge of keeping indexes current as necessary. The
1977 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
1978 : * be concurrently updated. However, in case of a cross-partition update,
1979 : * this routine does it.
1980 : */
1981 : static TM_Result
1982 313516 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1983 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1984 : bool canSetTag, UpdateContext *updateCxt)
1985 : {
1986 313516 : EState *estate = context->estate;
1987 313516 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1988 : bool partition_constraint_failed;
1989 : TM_Result result;
1990 :
1991 313516 : updateCxt->crossPartUpdate = false;
1992 :
1993 : /*
1994 : * If we move the tuple to a new partition, we loop back here to recompute
1995 : * GENERATED values (which are allowed to be different across partitions)
1996 : * and recheck any RLS policies and constraints. We do not fire any
1997 : * BEFORE triggers of the new partition, however.
1998 : */
1999 313522 : lreplace:
2000 : /* Fill in GENERATEd columns */
2001 313522 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2002 :
2003 : /* ensure slot is independent, consider e.g. EPQ */
2004 313522 : ExecMaterializeSlot(slot);
2005 :
2006 : /*
2007 : * If partition constraint fails, this row might get moved to another
2008 : * partition, in which case we should check the RLS CHECK policy just
2009 : * before inserting into the new partition, rather than doing it here.
2010 : * This is because a trigger on that partition might again change the row.
2011 : * So skip the WCO checks if the partition constraint fails.
2012 : */
2013 313522 : partition_constraint_failed =
2014 316056 : resultRelationDesc->rd_rel->relispartition &&
2015 2534 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2016 :
2017 : /* Check any RLS UPDATE WITH CHECK policies */
2018 313522 : if (!partition_constraint_failed &&
2019 312528 : resultRelInfo->ri_WithCheckOptions != NIL)
2020 : {
2021 : /*
2022 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2023 : * we are looking for at this point.
2024 : */
2025 480 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2026 : resultRelInfo, slot, estate);
2027 : }
2028 :
2029 : /*
2030 : * If a partition check failed, try to move the row into the right
2031 : * partition.
2032 : */
2033 313468 : if (partition_constraint_failed)
2034 : {
2035 : TupleTableSlot *inserted_tuple,
2036 : *retry_slot;
2037 994 : ResultRelInfo *insert_destrel = NULL;
2038 :
2039 : /*
2040 : * ExecCrossPartitionUpdate will first DELETE the row from the
2041 : * partition it's currently in and then insert it back into the root
2042 : * table, which will re-route it to the correct partition. However,
2043 : * if the tuple has been concurrently updated, a retry is needed.
2044 : */
2045 994 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2046 : tupleid, oldtuple, slot,
2047 : canSetTag, updateCxt,
2048 : &result,
2049 : &retry_slot,
2050 : &inserted_tuple,
2051 : &insert_destrel))
2052 : {
2053 : /* success! */
2054 800 : updateCxt->crossPartUpdate = true;
2055 :
2056 : /*
2057 : * If the partitioned table being updated is referenced in foreign
2058 : * keys, queue up trigger events to check that none of them were
2059 : * violated. No special treatment is needed in
2060 : * non-cross-partition update situations, because the leaf
2061 : * partition's AR update triggers will take care of that. During
2062 : * cross-partition updates implemented as delete on the source
2063 : * partition followed by insert on the destination partition,
2064 : * AR-UPDATE triggers of the root table (that is, the table
2065 : * mentioned in the query) must be fired.
2066 : *
2067 : * NULL insert_destrel means that the move failed to occur, that
2068 : * is, the update failed, so no need to anything in that case.
2069 : */
2070 800 : if (insert_destrel &&
2071 712 : resultRelInfo->ri_TrigDesc &&
2072 314 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2073 252 : ExecCrossPartitionUpdateForeignKey(context,
2074 : resultRelInfo,
2075 : insert_destrel,
2076 : tupleid, slot,
2077 : inserted_tuple);
2078 :
2079 804 : return TM_Ok;
2080 : }
2081 :
2082 : /*
2083 : * No luck, a retry is needed. If running MERGE, we do not do so
2084 : * here; instead let it handle that on its own rules.
2085 : */
2086 16 : if (context->mtstate->operation == CMD_MERGE)
2087 10 : return result;
2088 :
2089 : /*
2090 : * ExecCrossPartitionUpdate installed an updated version of the new
2091 : * tuple in the retry slot; start over.
2092 : */
2093 6 : slot = retry_slot;
2094 6 : goto lreplace;
2095 : }
2096 :
2097 : /*
2098 : * Check the constraints of the tuple. We've already checked the
2099 : * partition constraint above; however, we must still ensure the tuple
2100 : * passes all other constraints, so we will call ExecConstraints() and
2101 : * have it validate all remaining checks.
2102 : */
2103 312474 : if (resultRelationDesc->rd_att->constr)
2104 186056 : ExecConstraints(resultRelInfo, slot, estate);
2105 :
2106 : /*
2107 : * replace the heap tuple
2108 : *
2109 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2110 : * the row to be updated is visible to that snapshot, and throw a
2111 : * can't-serialize error if not. This is a special-case behavior needed
2112 : * for referential integrity updates in transaction-snapshot mode
2113 : * transactions.
2114 : */
2115 312424 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2116 : estate->es_output_cid,
2117 : estate->es_snapshot,
2118 : estate->es_crosscheck_snapshot,
2119 : true /* wait for commit */ ,
2120 : &context->tmfd, &updateCxt->lockmode,
2121 : &updateCxt->updateIndexes);
2122 :
2123 312400 : return result;
2124 : }
2125 :
2126 : /*
2127 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2128 : *
2129 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2130 : * returns indicating that the tuple was updated.
2131 : */
2132 : static void
2133 312406 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2134 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2135 : HeapTuple oldtuple, TupleTableSlot *slot)
2136 : {
2137 312406 : ModifyTableState *mtstate = context->mtstate;
2138 312406 : List *recheckIndexes = NIL;
2139 :
2140 : /* insert index entries for tuple if necessary */
2141 312406 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2142 169328 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2143 : slot, context->estate,
2144 : true, false,
2145 : NULL, NIL,
2146 169328 : (updateCxt->updateIndexes == TU_Summarizing));
2147 :
2148 : /* AFTER ROW UPDATE Triggers */
2149 312388 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2150 : NULL, NULL,
2151 : tupleid, oldtuple, slot,
2152 : recheckIndexes,
2153 312388 : mtstate->operation == CMD_INSERT ?
2154 : mtstate->mt_oc_transition_capture :
2155 : mtstate->mt_transition_capture,
2156 : false);
2157 :
2158 312388 : list_free(recheckIndexes);
2159 :
2160 : /*
2161 : * Check any WITH CHECK OPTION constraints from parent views. We are
2162 : * required to do this after testing all constraints and uniqueness
2163 : * violations per the SQL spec, so we do it after actually updating the
2164 : * record in the heap and all indexes.
2165 : *
2166 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2167 : * are looking for at this point.
2168 : */
2169 312388 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2170 454 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2171 : slot, context->estate);
2172 312312 : }
2173 :
2174 : /*
2175 : * Queues up an update event using the target root partitioned table's
2176 : * trigger to check that a cross-partition update hasn't broken any foreign
2177 : * keys pointing into it.
2178 : */
2179 : static void
2180 252 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2181 : ResultRelInfo *sourcePartInfo,
2182 : ResultRelInfo *destPartInfo,
2183 : ItemPointer tupleid,
2184 : TupleTableSlot *oldslot,
2185 : TupleTableSlot *newslot)
2186 : {
2187 : ListCell *lc;
2188 : ResultRelInfo *rootRelInfo;
2189 : List *ancestorRels;
2190 :
2191 252 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2192 252 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2193 :
2194 : /*
2195 : * For any foreign keys that point directly into a non-root ancestors of
2196 : * the source partition, we can in theory fire an update event to enforce
2197 : * those constraints using their triggers, if we could tell that both the
2198 : * source and the destination partitions are under the same ancestor. But
2199 : * for now, we simply report an error that those cannot be enforced.
2200 : */
2201 558 : foreach(lc, ancestorRels)
2202 : {
2203 312 : ResultRelInfo *rInfo = lfirst(lc);
2204 312 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2205 312 : bool has_noncloned_fkey = false;
2206 :
2207 : /* Root ancestor's triggers will be processed. */
2208 312 : if (rInfo == rootRelInfo)
2209 246 : continue;
2210 :
2211 66 : if (trigdesc && trigdesc->trig_update_after_row)
2212 : {
2213 228 : for (int i = 0; i < trigdesc->numtriggers; i++)
2214 : {
2215 168 : Trigger *trig = &trigdesc->triggers[i];
2216 :
2217 174 : if (!trig->tgisclone &&
2218 6 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2219 : {
2220 6 : has_noncloned_fkey = true;
2221 6 : break;
2222 : }
2223 : }
2224 : }
2225 :
2226 66 : if (has_noncloned_fkey)
2227 6 : ereport(ERROR,
2228 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2229 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2230 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2231 : RelationGetRelationName(rInfo->ri_RelationDesc),
2232 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2233 : errhint("Consider defining the foreign key on table \"%s\".",
2234 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2235 : }
2236 :
2237 : /* Perform the root table's triggers. */
2238 246 : ExecARUpdateTriggers(context->estate,
2239 : rootRelInfo, sourcePartInfo, destPartInfo,
2240 : tupleid, NULL, newslot, NIL, NULL, true);
2241 246 : }
2242 :
2243 : /* ----------------------------------------------------------------
2244 : * ExecUpdate
2245 : *
2246 : * note: we can't run UPDATE queries with transactions
2247 : * off because UPDATEs are actually INSERTs and our
2248 : * scan will mistakenly loop forever, updating the tuple
2249 : * it just inserted.. This should be fixed but until it
2250 : * is, we don't want to get stuck in an infinite loop
2251 : * which corrupts your database..
2252 : *
2253 : * When updating a table, tupleid identifies the tuple to update and
2254 : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2255 : * oldtuple is passed to the triggers and identifies what to update, and
2256 : * tupleid is invalid. When updating a foreign table, tupleid is
2257 : * invalid; the FDW has to figure out which row to update using data from
2258 : * the planSlot. oldtuple is passed to foreign table triggers; it is
2259 : * NULL when the foreign table has no relevant triggers.
2260 : *
2261 : * slot contains the new tuple value to be stored.
2262 : * planSlot is the output of the ModifyTable's subplan; we use it
2263 : * to access values from other input tables (for RETURNING),
2264 : * row-ID junk columns, etc.
2265 : *
2266 : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2267 : * had identified the tuple to update, it will identify the tuple
2268 : * actually updated after EvalPlanQual.
2269 : * ----------------------------------------------------------------
2270 : */
2271 : static TupleTableSlot *
2272 311824 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2273 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2274 : bool canSetTag)
2275 : {
2276 311824 : EState *estate = context->estate;
2277 311824 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2278 311824 : UpdateContext updateCxt = {0};
2279 : TM_Result result;
2280 :
2281 : /*
2282 : * abort the operation if not running transactions
2283 : */
2284 311824 : if (IsBootstrapProcessingMode())
2285 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2286 :
2287 : /*
2288 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2289 : * done if it says we are.
2290 : */
2291 311824 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2292 138 : return NULL;
2293 :
2294 : /* INSTEAD OF ROW UPDATE Triggers */
2295 311650 : if (resultRelInfo->ri_TrigDesc &&
2296 5394 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2297 : {
2298 114 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2299 : oldtuple, slot))
2300 18 : return NULL; /* "do nothing" */
2301 : }
2302 311536 : else if (resultRelInfo->ri_FdwRoutine)
2303 : {
2304 : /* Fill in GENERATEd columns */
2305 146 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2306 :
2307 : /*
2308 : * update in foreign table: let the FDW do it
2309 : */
2310 146 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2311 : resultRelInfo,
2312 : slot,
2313 : context->planSlot);
2314 :
2315 146 : if (slot == NULL) /* "do nothing" */
2316 2 : return NULL;
2317 :
2318 : /*
2319 : * AFTER ROW Triggers or RETURNING expressions might reference the
2320 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2321 : * them. (This covers the case where the FDW replaced the slot.)
2322 : */
2323 144 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2324 : }
2325 : else
2326 : {
2327 : /*
2328 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2329 : * must loop back here to try again. (We don't need to redo triggers,
2330 : * however. If there are any BEFORE triggers then trigger.c will have
2331 : * done table_tuple_lock to lock the correct tuple, so there's no need
2332 : * to do them again.)
2333 : */
2334 311390 : redo_act:
2335 311490 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2336 : canSetTag, &updateCxt);
2337 :
2338 : /*
2339 : * If ExecUpdateAct reports that a cross-partition update was done,
2340 : * then the RETURNING tuple (if any) has been projected and there's
2341 : * nothing else for us to do.
2342 : */
2343 311198 : if (updateCxt.crossPartUpdate)
2344 660 : return context->cpUpdateReturningSlot;
2345 :
2346 310538 : switch (result)
2347 : {
2348 84 : case TM_SelfModified:
2349 :
2350 : /*
2351 : * The target tuple was already updated or deleted by the
2352 : * current command, or by a later command in the current
2353 : * transaction. The former case is possible in a join UPDATE
2354 : * where multiple tuples join to the same target tuple. This
2355 : * is pretty questionable, but Postgres has always allowed it:
2356 : * we just execute the first update action and ignore
2357 : * additional update attempts.
2358 : *
2359 : * The latter case arises if the tuple is modified by a
2360 : * command in a BEFORE trigger, or perhaps by a command in a
2361 : * volatile function used in the query. In such situations we
2362 : * should not ignore the update, but it is equally unsafe to
2363 : * proceed. We don't want to discard the original UPDATE
2364 : * while keeping the triggered actions based on it; and we
2365 : * have no principled way to merge this update with the
2366 : * previous ones. So throwing an error is the only safe
2367 : * course.
2368 : *
2369 : * If a trigger actually intends this type of interaction, it
2370 : * can re-execute the UPDATE (assuming it can figure out how)
2371 : * and then return NULL to cancel the outer update.
2372 : */
2373 84 : if (context->tmfd.cmax != estate->es_output_cid)
2374 6 : ereport(ERROR,
2375 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2376 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2377 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2378 :
2379 : /* Else, already updated by self; nothing to do */
2380 78 : return NULL;
2381 :
2382 310294 : case TM_Ok:
2383 310294 : break;
2384 :
2385 152 : case TM_Updated:
2386 : {
2387 : TupleTableSlot *inputslot;
2388 : TupleTableSlot *epqslot;
2389 : TupleTableSlot *oldSlot;
2390 :
2391 152 : if (IsolationUsesXactSnapshot())
2392 4 : ereport(ERROR,
2393 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2394 : errmsg("could not serialize access due to concurrent update")));
2395 :
2396 : /*
2397 : * Already know that we're going to need to do EPQ, so
2398 : * fetch tuple directly into the right slot.
2399 : */
2400 148 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2401 : resultRelInfo->ri_RangeTableIndex);
2402 :
2403 148 : result = table_tuple_lock(resultRelationDesc, tupleid,
2404 : estate->es_snapshot,
2405 : inputslot, estate->es_output_cid,
2406 : updateCxt.lockmode, LockWaitBlock,
2407 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2408 : &context->tmfd);
2409 :
2410 144 : switch (result)
2411 : {
2412 134 : case TM_Ok:
2413 : Assert(context->tmfd.traversed);
2414 :
2415 134 : epqslot = EvalPlanQual(context->epqstate,
2416 : resultRelationDesc,
2417 : resultRelInfo->ri_RangeTableIndex,
2418 : inputslot);
2419 134 : if (TupIsNull(epqslot))
2420 : /* Tuple not passing quals anymore, exiting... */
2421 34 : return NULL;
2422 :
2423 : /* Make sure ri_oldTupleSlot is initialized. */
2424 100 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2425 0 : ExecInitUpdateProjection(context->mtstate,
2426 : resultRelInfo);
2427 :
2428 : /* Fetch the most recent version of old tuple. */
2429 100 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2430 100 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2431 : tupleid,
2432 : SnapshotAny,
2433 : oldSlot))
2434 0 : elog(ERROR, "failed to fetch tuple being updated");
2435 100 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2436 : epqslot, oldSlot);
2437 100 : goto redo_act;
2438 :
2439 2 : case TM_Deleted:
2440 : /* tuple already deleted; nothing to do */
2441 2 : return NULL;
2442 :
2443 8 : case TM_SelfModified:
2444 :
2445 : /*
2446 : * This can be reached when following an update
2447 : * chain from a tuple updated by another session,
2448 : * reaching a tuple that was already updated in
2449 : * this transaction. If previously modified by
2450 : * this command, ignore the redundant update,
2451 : * otherwise error out.
2452 : *
2453 : * See also TM_SelfModified response to
2454 : * table_tuple_update() above.
2455 : */
2456 8 : if (context->tmfd.cmax != estate->es_output_cid)
2457 2 : ereport(ERROR,
2458 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2459 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2460 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2461 6 : return NULL;
2462 :
2463 0 : default:
2464 : /* see table_tuple_lock call in ExecDelete() */
2465 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2466 : result);
2467 : return NULL;
2468 : }
2469 : }
2470 :
2471 : break;
2472 :
2473 8 : case TM_Deleted:
2474 8 : if (IsolationUsesXactSnapshot())
2475 0 : ereport(ERROR,
2476 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2477 : errmsg("could not serialize access due to concurrent delete")));
2478 : /* tuple already deleted; nothing to do */
2479 8 : return NULL;
2480 :
2481 0 : default:
2482 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2483 : result);
2484 : return NULL;
2485 : }
2486 : }
2487 :
2488 310528 : if (canSetTag)
2489 309936 : (estate->es_processed)++;
2490 :
2491 310528 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2492 : slot);
2493 :
2494 : /* Process RETURNING if present */
2495 310446 : if (resultRelInfo->ri_projectReturning)
2496 2156 : return ExecProcessReturning(resultRelInfo, slot, context->planSlot);
2497 :
2498 308290 : return NULL;
2499 : }
2500 :
2501 : /*
2502 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2503 : *
2504 : * Try to lock tuple for update as part of speculative insertion. If
2505 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2506 : * (but still lock row, even though it may not satisfy estate's
2507 : * snapshot).
2508 : *
2509 : * Returns true if we're done (with or without an update), or false if
2510 : * the caller must retry the INSERT from scratch.
2511 : */
2512 : static bool
2513 5200 : ExecOnConflictUpdate(ModifyTableContext *context,
2514 : ResultRelInfo *resultRelInfo,
2515 : ItemPointer conflictTid,
2516 : TupleTableSlot *excludedSlot,
2517 : bool canSetTag,
2518 : TupleTableSlot **returning)
2519 : {
2520 5200 : ModifyTableState *mtstate = context->mtstate;
2521 5200 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2522 5200 : Relation relation = resultRelInfo->ri_RelationDesc;
2523 5200 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2524 5200 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2525 : TM_FailureData tmfd;
2526 : LockTupleMode lockmode;
2527 : TM_Result test;
2528 : Datum xminDatum;
2529 : TransactionId xmin;
2530 : bool isnull;
2531 :
2532 : /* Determine lock mode to use */
2533 5200 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2534 :
2535 : /*
2536 : * Lock tuple for update. Don't follow updates when tuple cannot be
2537 : * locked without doing so. A row locking conflict here means our
2538 : * previous conclusion that the tuple is conclusively committed is not
2539 : * true anymore.
2540 : */
2541 5200 : test = table_tuple_lock(relation, conflictTid,
2542 5200 : context->estate->es_snapshot,
2543 5200 : existing, context->estate->es_output_cid,
2544 : lockmode, LockWaitBlock, 0,
2545 : &tmfd);
2546 5200 : switch (test)
2547 : {
2548 5176 : case TM_Ok:
2549 : /* success! */
2550 5176 : break;
2551 :
2552 24 : case TM_Invisible:
2553 :
2554 : /*
2555 : * This can occur when a just inserted tuple is updated again in
2556 : * the same command. E.g. because multiple rows with the same
2557 : * conflicting key values are inserted.
2558 : *
2559 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2560 : * case. We do not want to proceed because it would lead to the
2561 : * same row being updated a second time in some unspecified order,
2562 : * and in contrast to plain UPDATEs there's no historical behavior
2563 : * to break.
2564 : *
2565 : * It is the user's responsibility to prevent this situation from
2566 : * occurring. These problems are why the SQL standard similarly
2567 : * specifies that for SQL MERGE, an exception must be raised in
2568 : * the event of an attempt to update the same row twice.
2569 : */
2570 24 : xminDatum = slot_getsysattr(existing,
2571 : MinTransactionIdAttributeNumber,
2572 : &isnull);
2573 : Assert(!isnull);
2574 24 : xmin = DatumGetTransactionId(xminDatum);
2575 :
2576 24 : if (TransactionIdIsCurrentTransactionId(xmin))
2577 24 : ereport(ERROR,
2578 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2579 : /* translator: %s is a SQL command name */
2580 : errmsg("%s command cannot affect row a second time",
2581 : "ON CONFLICT DO UPDATE"),
2582 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2583 :
2584 : /* This shouldn't happen */
2585 0 : elog(ERROR, "attempted to lock invisible tuple");
2586 : break;
2587 :
2588 0 : case TM_SelfModified:
2589 :
2590 : /*
2591 : * This state should never be reached. As a dirty snapshot is used
2592 : * to find conflicting tuples, speculative insertion wouldn't have
2593 : * seen this row to conflict with.
2594 : */
2595 0 : elog(ERROR, "unexpected self-updated tuple");
2596 : break;
2597 :
2598 0 : case TM_Updated:
2599 0 : if (IsolationUsesXactSnapshot())
2600 0 : ereport(ERROR,
2601 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2602 : errmsg("could not serialize access due to concurrent update")));
2603 :
2604 : /*
2605 : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2606 : * a partitioned table we shouldn't reach to a case where tuple to
2607 : * be lock is moved to another partition due to concurrent update
2608 : * of the partition key.
2609 : */
2610 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2611 :
2612 : /*
2613 : * Tell caller to try again from the very start.
2614 : *
2615 : * It does not make sense to use the usual EvalPlanQual() style
2616 : * loop here, as the new version of the row might not conflict
2617 : * anymore, or the conflicting tuple has actually been deleted.
2618 : */
2619 0 : ExecClearTuple(existing);
2620 0 : return false;
2621 :
2622 0 : case TM_Deleted:
2623 0 : if (IsolationUsesXactSnapshot())
2624 0 : ereport(ERROR,
2625 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2626 : errmsg("could not serialize access due to concurrent delete")));
2627 :
2628 : /* see TM_Updated case */
2629 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2630 0 : ExecClearTuple(existing);
2631 0 : return false;
2632 :
2633 0 : default:
2634 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2635 : }
2636 :
2637 : /* Success, the tuple is locked. */
2638 :
2639 : /*
2640 : * Verify that the tuple is visible to our MVCC snapshot if the current
2641 : * isolation level mandates that.
2642 : *
2643 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2644 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2645 : *
2646 : * This means we only ever continue when a new command in the current
2647 : * transaction could see the row, even though in READ COMMITTED mode the
2648 : * tuple will not be visible according to the current statement's
2649 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2650 : * versions.
2651 : */
2652 5176 : ExecCheckTupleVisible(context->estate, relation, existing);
2653 :
2654 : /*
2655 : * Make tuple and any needed join variables available to ExecQual and
2656 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2657 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2658 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2659 : * other redirection.
2660 : */
2661 5176 : econtext->ecxt_scantuple = existing;
2662 5176 : econtext->ecxt_innertuple = excludedSlot;
2663 5176 : econtext->ecxt_outertuple = NULL;
2664 :
2665 5176 : if (!ExecQual(onConflictSetWhere, econtext))
2666 : {
2667 32 : ExecClearTuple(existing); /* see return below */
2668 32 : InstrCountFiltered1(&mtstate->ps, 1);
2669 32 : return true; /* done with the tuple */
2670 : }
2671 :
2672 5144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2673 : {
2674 : /*
2675 : * Check target's existing tuple against UPDATE-applicable USING
2676 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2677 : *
2678 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2679 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2680 : * but that's almost the extent of its special handling for ON
2681 : * CONFLICT DO UPDATE.
2682 : *
2683 : * The rewriter will also have associated UPDATE applicable straight
2684 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2685 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2686 : * kinds, so there is no danger of spurious over-enforcement in the
2687 : * INSERT or UPDATE path.
2688 : */
2689 60 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2690 : existing,
2691 : mtstate->ps.state);
2692 : }
2693 :
2694 : /* Project the new tuple version */
2695 5120 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2696 :
2697 : /*
2698 : * Note that it is possible that the target tuple has been modified in
2699 : * this session, after the above table_tuple_lock. We choose to not error
2700 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2701 : * This can happen if an UPDATE is triggered from within ExecQual(),
2702 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2703 : * wCTE in the ON CONFLICT's SET.
2704 : */
2705 :
2706 : /* Execute UPDATE with projection */
2707 10210 : *returning = ExecUpdate(context, resultRelInfo,
2708 : conflictTid, NULL,
2709 5120 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2710 : canSetTag);
2711 :
2712 : /*
2713 : * Clear out existing tuple, as there might not be another conflict among
2714 : * the next input rows. Don't want to hold resources till the end of the
2715 : * query.
2716 : */
2717 5090 : ExecClearTuple(existing);
2718 5090 : return true;
2719 : }
2720 :
2721 : /*
2722 : * Perform MERGE.
2723 : */
2724 : static TupleTableSlot *
2725 13454 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2726 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2727 : {
2728 13454 : TupleTableSlot *rslot = NULL;
2729 : bool matched;
2730 :
2731 : /*-----
2732 : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2733 : * valid, depending on whether the result relation is a table or a view.
2734 : * We execute the first action for which the additional WHEN MATCHED AND
2735 : * quals pass. If an action without quals is found, that action is
2736 : * executed.
2737 : *
2738 : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2739 : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2740 : * in sequence until one passes. This is almost identical to the WHEN
2741 : * MATCHED case, and both cases are handled by ExecMergeMatched().
2742 : *
2743 : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2744 : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2745 : * TARGET] actions in sequence until one passes.
2746 : *
2747 : * Things get interesting in case of concurrent update/delete of the
2748 : * target tuple. Such concurrent update/delete is detected while we are
2749 : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2750 : *
2751 : * A concurrent update can:
2752 : *
2753 : * 1. modify the target tuple so that the results from checking any
2754 : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2755 : * SOURCE actions potentially change, but the result from the join
2756 : * quals does not change.
2757 : *
2758 : * In this case, we are still dealing with the same kind of match
2759 : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2760 : * actions from the start and choose the first one that satisfies the
2761 : * new target tuple.
2762 : *
2763 : * 2. modify the target tuple in the WHEN MATCHED case so that the join
2764 : * quals no longer pass and hence the source and target tuples no
2765 : * longer match.
2766 : *
2767 : * In this case, we are now dealing with a NOT MATCHED case, and we
2768 : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2769 : * TARGET] actions. First ExecMergeMatched() processes the list of
2770 : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2771 : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2772 : * TARGET] actions in sequence until one passes. Thus we may execute
2773 : * two actions; one of each kind.
2774 : *
2775 : * Thus we support concurrent updates that turn MATCHED candidate rows
2776 : * into NOT MATCHED rows. However, we do not attempt to support cases
2777 : * that would turn NOT MATCHED rows into MATCHED rows, or which would
2778 : * cause a target row to match a different source row.
2779 : *
2780 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2781 : * [BY TARGET].
2782 : *
2783 : * ExecMergeMatched() takes care of following the update chain and
2784 : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
2785 : * action, as long as the target tuple still exists. If the target tuple
2786 : * gets deleted or a concurrent update causes the join quals to fail, it
2787 : * returns a matched status of false and we call ExecMergeNotMatched().
2788 : * Given that ExecMergeMatched() always makes progress by following the
2789 : * update chain and we never switch from ExecMergeNotMatched() to
2790 : * ExecMergeMatched(), there is no risk of a livelock.
2791 : */
2792 13454 : matched = tupleid != NULL || oldtuple != NULL;
2793 13454 : if (matched)
2794 10834 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
2795 : canSetTag, &matched);
2796 :
2797 : /*
2798 : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
2799 : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
2800 : * "matched" to false, indicating that it no longer matches).
2801 : */
2802 13364 : if (!matched)
2803 : {
2804 : /*
2805 : * If a concurrent update turned a MATCHED case into a NOT MATCHED
2806 : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
2807 : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
2808 : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
2809 : * SOURCE action, and computed the row to return. If so, we cannot
2810 : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
2811 : * pending (to be processed on the next call to ExecModifyTable()).
2812 : * Otherwise, just process the action now.
2813 : */
2814 2636 : if (rslot == NULL)
2815 2634 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
2816 : else
2817 2 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
2818 : }
2819 :
2820 13310 : return rslot;
2821 : }
2822 :
2823 : /*
2824 : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
2825 : * action, depending on whether the join quals are satisfied. If the target
2826 : * relation is a table, the current target tuple is identified by tupleid.
2827 : * Otherwise, if the target relation is a view, oldtuple is the current target
2828 : * tuple from the view.
2829 : *
2830 : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
2831 : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
2832 : * action do not pass, we check the second, then the third and so on. If we
2833 : * reach the end without finding a qualifying action, we return NULL.
2834 : * Otherwise, we execute the qualifying action and return its RETURNING
2835 : * result, if any, or NULL.
2836 : *
2837 : * On entry, "*matched" is assumed to be true. If a concurrent update or
2838 : * delete is detected that causes the join quals to no longer pass, we set it
2839 : * to false, indicating that the caller should process any NOT MATCHED [BY
2840 : * TARGET] actions.
2841 : *
2842 : * After a concurrent update, we restart from the first action to look for a
2843 : * new qualifying action to execute. If the join quals originally passed, and
2844 : * the concurrent update caused them to no longer pass, then we switch from
2845 : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
2846 : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
2847 : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
2848 : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
2849 : */
2850 : static TupleTableSlot *
2851 10834 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2852 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
2853 : bool *matched)
2854 : {
2855 10834 : ModifyTableState *mtstate = context->mtstate;
2856 10834 : List **mergeActions = resultRelInfo->ri_MergeActions;
2857 : List *actionStates;
2858 10834 : TupleTableSlot *newslot = NULL;
2859 10834 : TupleTableSlot *rslot = NULL;
2860 10834 : EState *estate = context->estate;
2861 10834 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2862 : bool isNull;
2863 10834 : EPQState *epqstate = &mtstate->mt_epqstate;
2864 : ListCell *l;
2865 :
2866 : /* Expect matched to be true on entry */
2867 : Assert(*matched);
2868 :
2869 : /*
2870 : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
2871 : * are done.
2872 : */
2873 10834 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
2874 1200 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
2875 528 : return NULL;
2876 :
2877 : /*
2878 : * Make tuple and any needed join variables available to ExecQual and
2879 : * ExecProject. The target's existing tuple is installed in the scantuple.
2880 : * This target relation's slot is required only in the case of a MATCHED
2881 : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
2882 : */
2883 10306 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
2884 10306 : econtext->ecxt_innertuple = context->planSlot;
2885 10306 : econtext->ecxt_outertuple = NULL;
2886 :
2887 : /*
2888 : * This routine is only invoked for matched target rows, so we should
2889 : * either have the tupleid of the target row, or an old tuple from the
2890 : * target wholerow junk attr.
2891 : */
2892 : Assert(tupleid != NULL || oldtuple != NULL);
2893 10306 : if (oldtuple != NULL)
2894 96 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
2895 : false);
2896 10210 : else if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2897 : tupleid,
2898 : SnapshotAny,
2899 : resultRelInfo->ri_oldTupleSlot))
2900 0 : elog(ERROR, "failed to fetch the target tuple");
2901 :
2902 : /*
2903 : * Test the join condition. If it's satisfied, perform a MATCHED action.
2904 : * Otherwise, perform a NOT MATCHED BY SOURCE action.
2905 : *
2906 : * Note that this join condition will be NULL if there are no NOT MATCHED
2907 : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
2908 : * need only consider MATCHED actions here.
2909 : */
2910 10306 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
2911 10136 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
2912 : else
2913 170 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
2914 :
2915 10306 : lmerge_matched:
2916 :
2917 18252 : foreach(l, actionStates)
2918 : {
2919 10434 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
2920 10434 : CmdType commandType = relaction->mas_action->commandType;
2921 : TM_Result result;
2922 10434 : UpdateContext updateCxt = {0};
2923 :
2924 : /*
2925 : * Test condition, if any.
2926 : *
2927 : * In the absence of any condition, we perform the action
2928 : * unconditionally (no need to check separately since ExecQual() will
2929 : * return true if there are no conditions to evaluate).
2930 : */
2931 10434 : if (!ExecQual(relaction->mas_whenqual, econtext))
2932 7882 : continue;
2933 :
2934 : /*
2935 : * Check if the existing target tuple meets the USING checks of
2936 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
2937 : * error.
2938 : *
2939 : * The WITH CHECK quals for UPDATE RLS policies are applied in
2940 : * ExecUpdateAct() and hence we need not do anything special to handle
2941 : * them.
2942 : *
2943 : * NOTE: We must do this after WHEN quals are evaluated, so that we
2944 : * check policies only when they matter.
2945 : */
2946 2552 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
2947 : {
2948 90 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
2949 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
2950 : resultRelInfo,
2951 : resultRelInfo->ri_oldTupleSlot,
2952 90 : context->mtstate->ps.state);
2953 : }
2954 :
2955 : /* Perform stated action */
2956 2528 : switch (commandType)
2957 : {
2958 2122 : case CMD_UPDATE:
2959 :
2960 : /*
2961 : * Project the output tuple, and use that to update the table.
2962 : * We don't need to filter out junk attributes, because the
2963 : * UPDATE action's targetlist doesn't have any.
2964 : */
2965 2122 : newslot = ExecProject(relaction->mas_proj);
2966 :
2967 2122 : mtstate->mt_merge_action = relaction;
2968 2122 : if (!ExecUpdatePrologue(context, resultRelInfo,
2969 : tupleid, NULL, newslot, &result))
2970 : {
2971 18 : if (result == TM_Ok)
2972 156 : return NULL; /* "do nothing" */
2973 :
2974 12 : break; /* concurrent update/delete */
2975 : }
2976 :
2977 : /* INSTEAD OF ROW UPDATE Triggers */
2978 2104 : if (resultRelInfo->ri_TrigDesc &&
2979 334 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2980 : {
2981 78 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2982 : oldtuple, newslot))
2983 0 : return NULL; /* "do nothing" */
2984 : }
2985 : else
2986 : {
2987 : /* called table_tuple_fetch_row_version() above */
2988 : Assert(oldtuple == NULL);
2989 :
2990 2026 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
2991 : NULL, newslot, canSetTag,
2992 : &updateCxt);
2993 :
2994 : /*
2995 : * As in ExecUpdate(), if ExecUpdateAct() reports that a
2996 : * cross-partition update was done, then there's nothing
2997 : * else for us to do --- the UPDATE has been turned into a
2998 : * DELETE and an INSERT, and we must not perform any of
2999 : * the usual post-update tasks. Also, the RETURNING tuple
3000 : * (if any) has been projected, so we can just return
3001 : * that.
3002 : */
3003 2006 : if (updateCxt.crossPartUpdate)
3004 : {
3005 134 : mtstate->mt_merge_updated += 1;
3006 134 : return context->cpUpdateReturningSlot;
3007 : }
3008 : }
3009 :
3010 1950 : if (result == TM_Ok)
3011 : {
3012 1878 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3013 : tupleid, NULL, newslot);
3014 1866 : mtstate->mt_merge_updated += 1;
3015 : }
3016 1938 : break;
3017 :
3018 376 : case CMD_DELETE:
3019 376 : mtstate->mt_merge_action = relaction;
3020 376 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3021 : NULL, NULL, &result))
3022 : {
3023 12 : if (result == TM_Ok)
3024 6 : return NULL; /* "do nothing" */
3025 :
3026 6 : break; /* concurrent update/delete */
3027 : }
3028 :
3029 : /* INSTEAD OF ROW DELETE Triggers */
3030 364 : if (resultRelInfo->ri_TrigDesc &&
3031 44 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3032 : {
3033 6 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3034 : oldtuple))
3035 0 : return NULL; /* "do nothing" */
3036 : }
3037 : else
3038 : {
3039 : /* called table_tuple_fetch_row_version() above */
3040 : Assert(oldtuple == NULL);
3041 :
3042 358 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3043 : false);
3044 : }
3045 :
3046 364 : if (result == TM_Ok)
3047 : {
3048 346 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3049 : false);
3050 346 : mtstate->mt_merge_deleted += 1;
3051 : }
3052 364 : break;
3053 :
3054 30 : case CMD_NOTHING:
3055 : /* Doing nothing is always OK */
3056 30 : result = TM_Ok;
3057 30 : break;
3058 :
3059 0 : default:
3060 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3061 : }
3062 :
3063 2350 : switch (result)
3064 : {
3065 2242 : case TM_Ok:
3066 : /* all good; perform final actions */
3067 2242 : if (canSetTag && commandType != CMD_NOTHING)
3068 2194 : (estate->es_processed)++;
3069 :
3070 2242 : break;
3071 :
3072 32 : case TM_SelfModified:
3073 :
3074 : /*
3075 : * The target tuple was already updated or deleted by the
3076 : * current command, or by a later command in the current
3077 : * transaction. The former case is explicitly disallowed by
3078 : * the SQL standard for MERGE, which insists that the MERGE
3079 : * join condition should not join a target row to more than
3080 : * one source row.
3081 : *
3082 : * The latter case arises if the tuple is modified by a
3083 : * command in a BEFORE trigger, or perhaps by a command in a
3084 : * volatile function used in the query. In such situations we
3085 : * should not ignore the MERGE action, but it is equally
3086 : * unsafe to proceed. We don't want to discard the original
3087 : * MERGE action while keeping the triggered actions based on
3088 : * it; and it would be no better to allow the original MERGE
3089 : * action while discarding the updates that it triggered. So
3090 : * throwing an error is the only safe course.
3091 : */
3092 32 : if (context->tmfd.cmax != estate->es_output_cid)
3093 12 : ereport(ERROR,
3094 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3095 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3096 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3097 :
3098 20 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3099 20 : ereport(ERROR,
3100 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3101 : /* translator: %s is a SQL command name */
3102 : errmsg("%s command cannot affect row a second time",
3103 : "MERGE"),
3104 : errhint("Ensure that not more than one source row matches any one target row.")));
3105 :
3106 : /* This shouldn't happen */
3107 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3108 : break;
3109 :
3110 10 : case TM_Deleted:
3111 10 : if (IsolationUsesXactSnapshot())
3112 0 : ereport(ERROR,
3113 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3114 : errmsg("could not serialize access due to concurrent delete")));
3115 :
3116 : /*
3117 : * If the tuple was already deleted, set matched to false to
3118 : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3119 : */
3120 10 : *matched = false;
3121 10 : return NULL;
3122 :
3123 66 : case TM_Updated:
3124 : {
3125 : bool was_matched;
3126 : Relation resultRelationDesc;
3127 : TupleTableSlot *epqslot,
3128 : *inputslot;
3129 : LockTupleMode lockmode;
3130 :
3131 : /*
3132 : * The target tuple was concurrently updated by some other
3133 : * transaction. If we are currently processing a MATCHED
3134 : * action, use EvalPlanQual() with the new version of the
3135 : * tuple and recheck the join qual, to detect a change
3136 : * from the MATCHED to the NOT MATCHED cases. If we are
3137 : * already processing a NOT MATCHED BY SOURCE action, we
3138 : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3139 : * MATCHED).
3140 : */
3141 66 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3142 66 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3143 66 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3144 :
3145 66 : if (was_matched)
3146 66 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3147 : resultRelInfo->ri_RangeTableIndex);
3148 : else
3149 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3150 :
3151 66 : result = table_tuple_lock(resultRelationDesc, tupleid,
3152 : estate->es_snapshot,
3153 : inputslot, estate->es_output_cid,
3154 : lockmode, LockWaitBlock,
3155 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3156 : &context->tmfd);
3157 66 : switch (result)
3158 : {
3159 64 : case TM_Ok:
3160 :
3161 : /*
3162 : * If the tuple was updated and migrated to
3163 : * another partition concurrently, the current
3164 : * MERGE implementation can't follow. There's
3165 : * probably a better way to handle this case, but
3166 : * it'd require recognizing the relation to which
3167 : * the tuple moved, and setting our current
3168 : * resultRelInfo to that.
3169 : */
3170 64 : if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid))
3171 0 : ereport(ERROR,
3172 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3173 : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3174 :
3175 : /*
3176 : * If this was a MATCHED case, use EvalPlanQual()
3177 : * to recheck the join condition.
3178 : */
3179 64 : if (was_matched)
3180 : {
3181 64 : epqslot = EvalPlanQual(epqstate,
3182 : resultRelationDesc,
3183 : resultRelInfo->ri_RangeTableIndex,
3184 : inputslot);
3185 :
3186 : /*
3187 : * If the subplan didn't return a tuple, then
3188 : * we must be dealing with an inner join for
3189 : * which the join condition no longer matches.
3190 : * This can only happen if there are no NOT
3191 : * MATCHED actions, and so there is nothing
3192 : * more to do.
3193 : */
3194 64 : if (TupIsNull(epqslot))
3195 0 : return NULL;
3196 :
3197 : /*
3198 : * If we got a NULL ctid from the subplan, the
3199 : * join quals no longer pass and we switch to
3200 : * the NOT MATCHED BY SOURCE case.
3201 : */
3202 64 : (void) ExecGetJunkAttribute(epqslot,
3203 64 : resultRelInfo->ri_RowIdAttNo,
3204 : &isNull);
3205 64 : if (isNull)
3206 4 : *matched = false;
3207 :
3208 : /*
3209 : * Otherwise, recheck the join quals to see if
3210 : * we need to switch to the NOT MATCHED BY
3211 : * SOURCE case.
3212 : */
3213 64 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3214 : &context->tmfd.ctid,
3215 : SnapshotAny,
3216 : resultRelInfo->ri_oldTupleSlot))
3217 0 : elog(ERROR, "failed to fetch the target tuple");
3218 :
3219 64 : if (*matched)
3220 60 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3221 : econtext);
3222 :
3223 : /* Switch lists, if necessary */
3224 64 : if (!*matched)
3225 6 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3226 : }
3227 :
3228 : /*
3229 : * Loop back and process the MATCHED or NOT
3230 : * MATCHED BY SOURCE actions from the start.
3231 : */
3232 64 : goto lmerge_matched;
3233 :
3234 0 : case TM_Deleted:
3235 :
3236 : /*
3237 : * tuple already deleted; tell caller to run NOT
3238 : * MATCHED [BY TARGET] actions
3239 : */
3240 0 : *matched = false;
3241 0 : return NULL;
3242 :
3243 2 : case TM_SelfModified:
3244 :
3245 : /*
3246 : * This can be reached when following an update
3247 : * chain from a tuple updated by another session,
3248 : * reaching a tuple that was already updated or
3249 : * deleted by the current command, or by a later
3250 : * command in the current transaction. As above,
3251 : * this should always be treated as an error.
3252 : */
3253 2 : if (context->tmfd.cmax != estate->es_output_cid)
3254 0 : ereport(ERROR,
3255 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3256 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3257 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3258 :
3259 2 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3260 2 : ereport(ERROR,
3261 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3262 : /* translator: %s is a SQL command name */
3263 : errmsg("%s command cannot affect row a second time",
3264 : "MERGE"),
3265 : errhint("Ensure that not more than one source row matches any one target row.")));
3266 :
3267 : /* This shouldn't happen */
3268 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3269 : return NULL;
3270 :
3271 0 : default:
3272 : /* see table_tuple_lock call in ExecDelete() */
3273 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3274 : result);
3275 : return NULL;
3276 : }
3277 : }
3278 :
3279 0 : case TM_Invisible:
3280 : case TM_WouldBlock:
3281 : case TM_BeingModified:
3282 : /* these should not occur */
3283 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3284 : break;
3285 : }
3286 :
3287 : /* Process RETURNING if present */
3288 2242 : if (resultRelInfo->ri_projectReturning)
3289 : {
3290 252 : switch (commandType)
3291 : {
3292 156 : case CMD_UPDATE:
3293 156 : rslot = ExecProcessReturning(resultRelInfo, newslot,
3294 : context->planSlot);
3295 156 : break;
3296 :
3297 96 : case CMD_DELETE:
3298 96 : rslot = ExecProcessReturning(resultRelInfo,
3299 : resultRelInfo->ri_oldTupleSlot,
3300 : context->planSlot);
3301 96 : break;
3302 :
3303 0 : case CMD_NOTHING:
3304 0 : break;
3305 :
3306 0 : default:
3307 0 : elog(ERROR, "unrecognized commandType: %d",
3308 : (int) commandType);
3309 : }
3310 : }
3311 :
3312 : /*
3313 : * We've activated one of the WHEN clauses, so we don't search
3314 : * further. This is required behaviour, not an optimization.
3315 : */
3316 2242 : break;
3317 : }
3318 :
3319 : /*
3320 : * Successfully executed an action or no qualifying action was found.
3321 : */
3322 10060 : return rslot;
3323 : }
3324 :
3325 : /*
3326 : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3327 : */
3328 : static TupleTableSlot *
3329 2636 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3330 : bool canSetTag)
3331 : {
3332 2636 : ModifyTableState *mtstate = context->mtstate;
3333 2636 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3334 : List *actionStates;
3335 2636 : TupleTableSlot *rslot = NULL;
3336 : ListCell *l;
3337 :
3338 : /*
3339 : * For INSERT actions, the root relation's merge action is OK since the
3340 : * INSERT's targetlist and the WHEN conditions can only refer to the
3341 : * source relation and hence it does not matter which result relation we
3342 : * work with.
3343 : *
3344 : * XXX does this mean that we can avoid creating copies of actionStates on
3345 : * partitioned tables, for not-matched actions?
3346 : */
3347 2636 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3348 :
3349 : /*
3350 : * Make source tuple available to ExecQual and ExecProject. We don't need
3351 : * the target tuple, since the WHEN quals and targetlist can't refer to
3352 : * the target columns.
3353 : */
3354 2636 : econtext->ecxt_scantuple = NULL;
3355 2636 : econtext->ecxt_innertuple = context->planSlot;
3356 2636 : econtext->ecxt_outertuple = NULL;
3357 :
3358 3506 : foreach(l, actionStates)
3359 : {
3360 2636 : MergeActionState *action = (MergeActionState *) lfirst(l);
3361 2636 : CmdType commandType = action->mas_action->commandType;
3362 : TupleTableSlot *newslot;
3363 :
3364 : /*
3365 : * Test condition, if any.
3366 : *
3367 : * In the absence of any condition, we perform the action
3368 : * unconditionally (no need to check separately since ExecQual() will
3369 : * return true if there are no conditions to evaluate).
3370 : */
3371 2636 : if (!ExecQual(action->mas_whenqual, econtext))
3372 870 : continue;
3373 :
3374 : /* Perform stated action */
3375 1766 : switch (commandType)
3376 : {
3377 1766 : case CMD_INSERT:
3378 :
3379 : /*
3380 : * Project the tuple. In case of a partitioned table, the
3381 : * projection was already built to use the root's descriptor,
3382 : * so we don't need to map the tuple here.
3383 : */
3384 1766 : newslot = ExecProject(action->mas_proj);
3385 1766 : mtstate->mt_merge_action = action;
3386 :
3387 1766 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3388 : newslot, canSetTag, NULL, NULL);
3389 1712 : mtstate->mt_merge_inserted += 1;
3390 1712 : break;
3391 0 : case CMD_NOTHING:
3392 : /* Do nothing */
3393 0 : break;
3394 0 : default:
3395 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3396 : }
3397 :
3398 : /*
3399 : * We've activated one of the WHEN clauses, so we don't search
3400 : * further. This is required behaviour, not an optimization.
3401 : */
3402 1712 : break;
3403 : }
3404 :
3405 2582 : return rslot;
3406 : }
3407 :
3408 : /*
3409 : * Initialize state for execution of MERGE.
3410 : */
3411 : void
3412 1404 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3413 : {
3414 1404 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3415 1404 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3416 : ResultRelInfo *resultRelInfo;
3417 : ExprContext *econtext;
3418 : ListCell *lc;
3419 : int i;
3420 :
3421 1404 : if (node->mergeActionLists == NIL)
3422 0 : return;
3423 :
3424 1404 : mtstate->mt_merge_subcommands = 0;
3425 :
3426 1404 : if (mtstate->ps.ps_ExprContext == NULL)
3427 1242 : ExecAssignExprContext(estate, &mtstate->ps);
3428 1404 : econtext = mtstate->ps.ps_ExprContext;
3429 :
3430 : /*
3431 : * Create a MergeActionState for each action on the mergeActionList and
3432 : * add it to either a list of matched actions or not-matched actions.
3433 : *
3434 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3435 : * anything here, do so there too.
3436 : */
3437 1404 : i = 0;
3438 3040 : foreach(lc, node->mergeActionLists)
3439 : {
3440 1636 : List *mergeActionList = lfirst(lc);
3441 : Node *joinCondition;
3442 : TupleDesc relationDesc;
3443 : ListCell *l;
3444 :
3445 1636 : joinCondition = (Node *) list_nth(node->mergeJoinConditions, i);
3446 1636 : resultRelInfo = mtstate->resultRelInfo + i;
3447 1636 : i++;
3448 1636 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3449 :
3450 : /* initialize slots for MERGE fetches from this rel */
3451 1636 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3452 1636 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3453 :
3454 : /* initialize state for join condition checking */
3455 1636 : resultRelInfo->ri_MergeJoinCondition =
3456 1636 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3457 :
3458 4556 : foreach(l, mergeActionList)
3459 : {
3460 2920 : MergeAction *action = (MergeAction *) lfirst(l);
3461 : MergeActionState *action_state;
3462 : TupleTableSlot *tgtslot;
3463 : TupleDesc tgtdesc;
3464 :
3465 : /*
3466 : * Build action merge state for this rel. (For partitions,
3467 : * equivalent code exists in ExecInitPartitionInfo.)
3468 : */
3469 2920 : action_state = makeNode(MergeActionState);
3470 2920 : action_state->mas_action = action;
3471 2920 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3472 : &mtstate->ps);
3473 :
3474 : /*
3475 : * We create three lists - one for each MergeMatchKind - and stick
3476 : * the MergeActionState into the appropriate list.
3477 : */
3478 5840 : resultRelInfo->ri_MergeActions[action->matchKind] =
3479 2920 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3480 : action_state);
3481 :
3482 2920 : switch (action->commandType)
3483 : {
3484 964 : case CMD_INSERT:
3485 964 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3486 : action->targetList);
3487 :
3488 : /*
3489 : * If the MERGE targets a partitioned table, any INSERT
3490 : * actions must be routed through it, not the child
3491 : * relations. Initialize the routing struct and the root
3492 : * table's "new" tuple slot for that, if not already done.
3493 : * The projection we prepare, for all relations, uses the
3494 : * root relation descriptor, and targets the plan's root
3495 : * slot. (This is consistent with the fact that we
3496 : * checked the plan output to match the root relation,
3497 : * above.)
3498 : */
3499 964 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3500 : RELKIND_PARTITIONED_TABLE)
3501 : {
3502 298 : if (mtstate->mt_partition_tuple_routing == NULL)
3503 : {
3504 : /*
3505 : * Initialize planstate for routing if not already
3506 : * done.
3507 : *
3508 : * Note that the slot is managed as a standalone
3509 : * slot belonging to ModifyTableState, so we pass
3510 : * NULL for the 2nd argument.
3511 : */
3512 124 : mtstate->mt_root_tuple_slot =
3513 124 : table_slot_create(rootRelInfo->ri_RelationDesc,
3514 : NULL);
3515 124 : mtstate->mt_partition_tuple_routing =
3516 124 : ExecSetupPartitionTupleRouting(estate,
3517 : rootRelInfo->ri_RelationDesc);
3518 : }
3519 298 : tgtslot = mtstate->mt_root_tuple_slot;
3520 298 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3521 : }
3522 : else
3523 : {
3524 : /* not partitioned? use the stock relation and slot */
3525 666 : tgtslot = resultRelInfo->ri_newTupleSlot;
3526 666 : tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3527 : }
3528 :
3529 964 : action_state->mas_proj =
3530 964 : ExecBuildProjectionInfo(action->targetList, econtext,
3531 : tgtslot,
3532 : &mtstate->ps,
3533 : tgtdesc);
3534 :
3535 964 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3536 964 : break;
3537 1494 : case CMD_UPDATE:
3538 1494 : action_state->mas_proj =
3539 1494 : ExecBuildUpdateProjection(action->targetList,
3540 : true,
3541 : action->updateColnos,
3542 : relationDesc,
3543 : econtext,
3544 : resultRelInfo->ri_newTupleSlot,
3545 : &mtstate->ps);
3546 1494 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3547 1494 : break;
3548 404 : case CMD_DELETE:
3549 404 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3550 404 : break;
3551 58 : case CMD_NOTHING:
3552 58 : break;
3553 0 : default:
3554 0 : elog(ERROR, "unknown operation");
3555 : break;
3556 : }
3557 : }
3558 : }
3559 : }
3560 :
3561 : /*
3562 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3563 : *
3564 : * We mark 'projectNewInfoValid' even though the projections themselves
3565 : * are not initialized here.
3566 : */
3567 : void
3568 1654 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3569 : ResultRelInfo *resultRelInfo)
3570 : {
3571 1654 : EState *estate = mtstate->ps.state;
3572 :
3573 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3574 :
3575 1654 : resultRelInfo->ri_oldTupleSlot =
3576 1654 : table_slot_create(resultRelInfo->ri_RelationDesc,
3577 : &estate->es_tupleTable);
3578 1654 : resultRelInfo->ri_newTupleSlot =
3579 1654 : table_slot_create(resultRelInfo->ri_RelationDesc,
3580 : &estate->es_tupleTable);
3581 1654 : resultRelInfo->ri_projectNewInfoValid = true;
3582 1654 : }
3583 :
3584 : /*
3585 : * Process BEFORE EACH STATEMENT triggers
3586 : */
3587 : static void
3588 113996 : fireBSTriggers(ModifyTableState *node)
3589 : {
3590 113996 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3591 113996 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3592 :
3593 113996 : switch (node->operation)
3594 : {
3595 88838 : case CMD_INSERT:
3596 88838 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3597 88826 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3598 828 : ExecBSUpdateTriggers(node->ps.state,
3599 : resultRelInfo);
3600 88826 : break;
3601 12006 : case CMD_UPDATE:
3602 12006 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3603 12006 : break;
3604 11856 : case CMD_DELETE:
3605 11856 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3606 11856 : break;
3607 1296 : case CMD_MERGE:
3608 1296 : if (node->mt_merge_subcommands & MERGE_INSERT)
3609 718 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3610 1296 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3611 914 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3612 1296 : if (node->mt_merge_subcommands & MERGE_DELETE)
3613 332 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3614 1296 : break;
3615 0 : default:
3616 0 : elog(ERROR, "unknown operation");
3617 : break;
3618 : }
3619 113984 : }
3620 :
3621 : /*
3622 : * Process AFTER EACH STATEMENT triggers
3623 : */
3624 : static void
3625 110996 : fireASTriggers(ModifyTableState *node)
3626 : {
3627 110996 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3628 110996 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3629 :
3630 110996 : switch (node->operation)
3631 : {
3632 86698 : case CMD_INSERT:
3633 86698 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3634 726 : ExecASUpdateTriggers(node->ps.state,
3635 : resultRelInfo,
3636 726 : node->mt_oc_transition_capture);
3637 86698 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3638 86698 : node->mt_transition_capture);
3639 86698 : break;
3640 11414 : case CMD_UPDATE:
3641 11414 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3642 11414 : node->mt_transition_capture);
3643 11414 : break;
3644 11732 : case CMD_DELETE:
3645 11732 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3646 11732 : node->mt_transition_capture);
3647 11732 : break;
3648 1152 : case CMD_MERGE:
3649 1152 : if (node->mt_merge_subcommands & MERGE_DELETE)
3650 296 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3651 296 : node->mt_transition_capture);
3652 1152 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3653 818 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3654 818 : node->mt_transition_capture);
3655 1152 : if (node->mt_merge_subcommands & MERGE_INSERT)
3656 656 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3657 656 : node->mt_transition_capture);
3658 1152 : break;
3659 0 : default:
3660 0 : elog(ERROR, "unknown operation");
3661 : break;
3662 : }
3663 110996 : }
3664 :
3665 : /*
3666 : * Set up the state needed for collecting transition tuples for AFTER
3667 : * triggers.
3668 : */
3669 : static void
3670 114294 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
3671 : {
3672 114294 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
3673 114294 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
3674 :
3675 : /* Check for transition tables on the directly targeted relation. */
3676 114294 : mtstate->mt_transition_capture =
3677 114294 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3678 114294 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3679 : mtstate->operation);
3680 114294 : if (plan->operation == CMD_INSERT &&
3681 88840 : plan->onConflictAction == ONCONFLICT_UPDATE)
3682 828 : mtstate->mt_oc_transition_capture =
3683 828 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3684 828 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3685 : CMD_UPDATE);
3686 114294 : }
3687 :
3688 : /*
3689 : * ExecPrepareTupleRouting --- prepare for routing one tuple
3690 : *
3691 : * Determine the partition in which the tuple in slot is to be inserted,
3692 : * and return its ResultRelInfo in *partRelInfo. The return value is
3693 : * a slot holding the tuple of the partition rowtype.
3694 : *
3695 : * This also sets the transition table information in mtstate based on the
3696 : * selected partition.
3697 : */
3698 : static TupleTableSlot *
3699 721468 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
3700 : EState *estate,
3701 : PartitionTupleRouting *proute,
3702 : ResultRelInfo *targetRelInfo,
3703 : TupleTableSlot *slot,
3704 : ResultRelInfo **partRelInfo)
3705 : {
3706 : ResultRelInfo *partrel;
3707 : TupleConversionMap *map;
3708 :
3709 : /*
3710 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3711 : * not find a valid partition for the tuple in 'slot' then an error is
3712 : * raised. An error may also be raised if the found partition is not a
3713 : * valid target for INSERTs. This is required since a partitioned table
3714 : * UPDATE to another partition becomes a DELETE+INSERT.
3715 : */
3716 721468 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3717 :
3718 : /*
3719 : * If we're capturing transition tuples, we might need to convert from the
3720 : * partition rowtype to root partitioned table's rowtype. But if there
3721 : * are no BEFORE triggers on the partition that could change the tuple, we
3722 : * can just remember the original unconverted tuple to avoid a needless
3723 : * round trip conversion.
3724 : */
3725 721264 : if (mtstate->mt_transition_capture != NULL)
3726 : {
3727 : bool has_before_insert_row_trig;
3728 :
3729 168 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3730 42 : partrel->ri_TrigDesc->trig_insert_before_row);
3731 :
3732 126 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
3733 126 : !has_before_insert_row_trig ? slot : NULL;
3734 : }
3735 :
3736 : /*
3737 : * Convert the tuple, if necessary.
3738 : */
3739 721264 : map = ExecGetRootToChildMap(partrel, estate);
3740 721264 : if (map != NULL)
3741 : {
3742 68406 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3743 :
3744 68406 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3745 : }
3746 :
3747 721264 : *partRelInfo = partrel;
3748 721264 : return slot;
3749 : }
3750 :
3751 : /* ----------------------------------------------------------------
3752 : * ExecModifyTable
3753 : *
3754 : * Perform table modifications as required, and return RETURNING results
3755 : * if needed.
3756 : * ----------------------------------------------------------------
3757 : */
3758 : static TupleTableSlot *
3759 122194 : ExecModifyTable(PlanState *pstate)
3760 : {
3761 122194 : ModifyTableState *node = castNode(ModifyTableState, pstate);
3762 : ModifyTableContext context;
3763 122194 : EState *estate = node->ps.state;
3764 122194 : CmdType operation = node->operation;
3765 : ResultRelInfo *resultRelInfo;
3766 : PlanState *subplanstate;
3767 : TupleTableSlot *slot;
3768 : TupleTableSlot *oldSlot;
3769 : ItemPointerData tuple_ctid;
3770 : HeapTupleData oldtupdata;
3771 : HeapTuple oldtuple;
3772 : ItemPointer tupleid;
3773 :
3774 122194 : CHECK_FOR_INTERRUPTS();
3775 :
3776 : /*
3777 : * This should NOT get called during EvalPlanQual; we should have passed a
3778 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
3779 : * Assert because this condition is easy to miss in testing. (Note:
3780 : * although ModifyTable should not get executed within an EvalPlanQual
3781 : * operation, we do have to allow it to be initialized and shut down in
3782 : * case it is within a CTE subplan. Hence this test must be here, not in
3783 : * ExecInitModifyTable.)
3784 : */
3785 122194 : if (estate->es_epq_active != NULL)
3786 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
3787 :
3788 : /*
3789 : * If we've already completed processing, don't try to do more. We need
3790 : * this test because ExecPostprocessPlan might call us an extra time, and
3791 : * our subplan's nodes aren't necessarily robust against being called
3792 : * extra times.
3793 : */
3794 122194 : if (node->mt_done)
3795 782 : return NULL;
3796 :
3797 : /*
3798 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
3799 : */
3800 121412 : if (node->fireBSTriggers)
3801 : {
3802 113996 : fireBSTriggers(node);
3803 113984 : node->fireBSTriggers = false;
3804 : }
3805 :
3806 : /* Preload local variables */
3807 121400 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
3808 121400 : subplanstate = outerPlanState(node);
3809 :
3810 : /* Set global context */
3811 121400 : context.mtstate = node;
3812 121400 : context.epqstate = &node->mt_epqstate;
3813 121400 : context.estate = estate;
3814 :
3815 : /*
3816 : * Fetch rows from subplan, and execute the required table modification
3817 : * for each row.
3818 : */
3819 : for (;;)
3820 : {
3821 : /*
3822 : * Reset the per-output-tuple exprcontext. This is needed because
3823 : * triggers expect to use that context as workspace. It's a bit ugly
3824 : * to do this below the top level of the plan, however. We might need
3825 : * to rethink this later.
3826 : */
3827 13164286 : ResetPerTupleExprContext(estate);
3828 :
3829 : /*
3830 : * Reset per-tuple memory context used for processing on conflict and
3831 : * returning clauses, to free any expression evaluation storage
3832 : * allocated in the previous cycle.
3833 : */
3834 13164286 : if (pstate->ps_ExprContext)
3835 339118 : ResetExprContext(pstate->ps_ExprContext);
3836 :
3837 : /*
3838 : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
3839 : * to execute, do so now --- see the comments in ExecMerge().
3840 : */
3841 13164286 : if (node->mt_merge_pending_not_matched != NULL)
3842 : {
3843 2 : context.planSlot = node->mt_merge_pending_not_matched;
3844 :
3845 2 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
3846 2 : node->canSetTag);
3847 :
3848 : /* Clear the pending action */
3849 2 : node->mt_merge_pending_not_matched = NULL;
3850 :
3851 : /*
3852 : * If we got a RETURNING result, return it to the caller. We'll
3853 : * continue the work on next call.
3854 : */
3855 2 : if (slot)
3856 2 : return slot;
3857 :
3858 0 : continue; /* continue with the next tuple */
3859 : }
3860 :
3861 : /* Fetch the next row from subplan */
3862 13164284 : context.planSlot = ExecProcNode(subplanstate);
3863 :
3864 : /* No more tuples to process? */
3865 13163884 : if (TupIsNull(context.planSlot))
3866 : break;
3867 :
3868 : /*
3869 : * When there are multiple result relations, each tuple contains a
3870 : * junk column that gives the OID of the rel from which it came.
3871 : * Extract it and select the correct result relation.
3872 : */
3873 13052888 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
3874 : {
3875 : Datum datum;
3876 : bool isNull;
3877 : Oid resultoid;
3878 :
3879 4740 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
3880 : &isNull);
3881 4740 : if (isNull)
3882 : {
3883 : /*
3884 : * For commands other than MERGE, any tuples having InvalidOid
3885 : * for tableoid are errors. For MERGE, we may need to handle
3886 : * them as WHEN NOT MATCHED clauses if any, so do that.
3887 : *
3888 : * Note that we use the node's toplevel resultRelInfo, not any
3889 : * specific partition's.
3890 : */
3891 466 : if (operation == CMD_MERGE)
3892 : {
3893 466 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3894 :
3895 466 : slot = ExecMerge(&context, node->resultRelInfo,
3896 466 : NULL, NULL, node->canSetTag);
3897 :
3898 : /*
3899 : * If we got a RETURNING result, return it to the caller.
3900 : * We'll continue the work on next call.
3901 : */
3902 460 : if (slot)
3903 20 : return slot;
3904 :
3905 440 : continue; /* continue with the next tuple */
3906 : }
3907 :
3908 0 : elog(ERROR, "tableoid is NULL");
3909 : }
3910 4274 : resultoid = DatumGetObjectId(datum);
3911 :
3912 : /* If it's not the same as last time, we need to locate the rel */
3913 4274 : if (resultoid != node->mt_lastResultOid)
3914 2878 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
3915 : false, true);
3916 : }
3917 :
3918 : /*
3919 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
3920 : * here is compute the RETURNING expressions.
3921 : */
3922 13052422 : if (resultRelInfo->ri_usesFdwDirectModify)
3923 : {
3924 : Assert(resultRelInfo->ri_projectReturning);
3925 :
3926 : /*
3927 : * A scan slot containing the data that was actually inserted,
3928 : * updated or deleted has already been made available to
3929 : * ExecProcessReturning by IterateDirectModify, so no need to
3930 : * provide it here.
3931 : */
3932 694 : slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot);
3933 :
3934 694 : return slot;
3935 : }
3936 :
3937 13051728 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3938 13051728 : slot = context.planSlot;
3939 :
3940 13051728 : tupleid = NULL;
3941 13051728 : oldtuple = NULL;
3942 :
3943 : /*
3944 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
3945 : * to be updated/deleted/merged. For a heap relation, that's a TID;
3946 : * otherwise we may have a wholerow junk attr that carries the old
3947 : * tuple in toto. Keep this in step with the part of
3948 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
3949 : */
3950 13051728 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
3951 : operation == CMD_MERGE)
3952 : {
3953 : char relkind;
3954 : Datum datum;
3955 : bool isNull;
3956 :
3957 1844316 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
3958 1844316 : if (relkind == RELKIND_RELATION ||
3959 502 : relkind == RELKIND_MATVIEW ||
3960 : relkind == RELKIND_PARTITIONED_TABLE)
3961 : {
3962 : /* ri_RowIdAttNo refers to a ctid attribute */
3963 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
3964 1843820 : datum = ExecGetJunkAttribute(slot,
3965 1843820 : resultRelInfo->ri_RowIdAttNo,
3966 : &isNull);
3967 :
3968 : /*
3969 : * For commands other than MERGE, any tuples having a null row
3970 : * identifier are errors. For MERGE, we may need to handle
3971 : * them as WHEN NOT MATCHED clauses if any, so do that.
3972 : *
3973 : * Note that we use the node's toplevel resultRelInfo, not any
3974 : * specific partition's.
3975 : */
3976 1843820 : if (isNull)
3977 : {
3978 2106 : if (operation == CMD_MERGE)
3979 : {
3980 2106 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3981 :
3982 2106 : slot = ExecMerge(&context, node->resultRelInfo,
3983 2106 : NULL, NULL, node->canSetTag);
3984 :
3985 : /*
3986 : * If we got a RETURNING result, return it to the
3987 : * caller. We'll continue the work on next call.
3988 : */
3989 2064 : if (slot)
3990 104 : return slot;
3991 :
3992 2002 : continue; /* continue with the next tuple */
3993 : }
3994 :
3995 0 : elog(ERROR, "ctid is NULL");
3996 : }
3997 :
3998 1841714 : tupleid = (ItemPointer) DatumGetPointer(datum);
3999 1841714 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4000 1841714 : tupleid = &tuple_ctid;
4001 : }
4002 :
4003 : /*
4004 : * Use the wholerow attribute, when available, to reconstruct the
4005 : * old relation tuple. The old tuple serves one or both of two
4006 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4007 : * provides values for any unchanged columns for the NEW tuple of
4008 : * an UPDATE, because the subplan does not produce all the columns
4009 : * of the target table.
4010 : *
4011 : * Note that the wholerow attribute does not carry system columns,
4012 : * so foreign table triggers miss seeing those, except that we
4013 : * know enough here to set t_tableOid. Quite separately from
4014 : * this, the FDW may fetch its own junk attrs to identify the row.
4015 : *
4016 : * Other relevant relkinds, currently limited to views, always
4017 : * have a wholerow attribute.
4018 : */
4019 496 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4020 : {
4021 478 : datum = ExecGetJunkAttribute(slot,
4022 478 : resultRelInfo->ri_RowIdAttNo,
4023 : &isNull);
4024 :
4025 : /*
4026 : * For commands other than MERGE, any tuples having a null row
4027 : * identifier are errors. For MERGE, we may need to handle
4028 : * them as WHEN NOT MATCHED clauses if any, so do that.
4029 : *
4030 : * Note that we use the node's toplevel resultRelInfo, not any
4031 : * specific partition's.
4032 : */
4033 478 : if (isNull)
4034 : {
4035 48 : if (operation == CMD_MERGE)
4036 : {
4037 48 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4038 :
4039 48 : slot = ExecMerge(&context, node->resultRelInfo,
4040 48 : NULL, NULL, node->canSetTag);
4041 :
4042 : /*
4043 : * If we got a RETURNING result, return it to the
4044 : * caller. We'll continue the work on next call.
4045 : */
4046 42 : if (slot)
4047 12 : return slot;
4048 :
4049 30 : continue; /* continue with the next tuple */
4050 : }
4051 :
4052 0 : elog(ERROR, "wholerow is NULL");
4053 : }
4054 :
4055 430 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4056 430 : oldtupdata.t_len =
4057 430 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4058 430 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4059 : /* Historically, view triggers see invalid t_tableOid. */
4060 430 : oldtupdata.t_tableOid =
4061 430 : (relkind == RELKIND_VIEW) ? InvalidOid :
4062 166 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4063 :
4064 430 : oldtuple = &oldtupdata;
4065 : }
4066 : else
4067 : {
4068 : /* Only foreign tables are allowed to omit a row-ID attr */
4069 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4070 : }
4071 : }
4072 :
4073 13049574 : switch (operation)
4074 : {
4075 11207412 : case CMD_INSERT:
4076 : /* Initialize projection info if first time for this table */
4077 11207412 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4078 87754 : ExecInitInsertProjection(node, resultRelInfo);
4079 11207412 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4080 11207412 : slot = ExecInsert(&context, resultRelInfo, slot,
4081 11207412 : node->canSetTag, NULL, NULL);
4082 11205464 : break;
4083 :
4084 306704 : case CMD_UPDATE:
4085 : /* Initialize projection info if first time for this table */
4086 306704 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4087 11746 : ExecInitUpdateProjection(node, resultRelInfo);
4088 :
4089 : /*
4090 : * Make the new tuple by combining plan's output tuple with
4091 : * the old tuple being updated.
4092 : */
4093 306704 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4094 306704 : if (oldtuple != NULL)
4095 : {
4096 : /* Use the wholerow junk attr as the old tuple. */
4097 262 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4098 : }
4099 : else
4100 : {
4101 : /* Fetch the most recent version of old tuple. */
4102 306442 : Relation relation = resultRelInfo->ri_RelationDesc;
4103 :
4104 306442 : if (!table_tuple_fetch_row_version(relation, tupleid,
4105 : SnapshotAny,
4106 : oldSlot))
4107 0 : elog(ERROR, "failed to fetch tuple being updated");
4108 : }
4109 306704 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4110 : oldSlot);
4111 :
4112 : /* Now apply the update. */
4113 306704 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4114 306704 : slot, node->canSetTag);
4115 306302 : break;
4116 :
4117 1524624 : case CMD_DELETE:
4118 1524624 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4119 1524624 : true, false, node->canSetTag, NULL, NULL, NULL);
4120 1524542 : break;
4121 :
4122 10834 : case CMD_MERGE:
4123 10834 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4124 10834 : node->canSetTag);
4125 10744 : break;
4126 :
4127 0 : default:
4128 0 : elog(ERROR, "unknown operation");
4129 : break;
4130 : }
4131 :
4132 : /*
4133 : * If we got a RETURNING result, return it to caller. We'll continue
4134 : * the work on next call.
4135 : */
4136 13047052 : if (slot)
4137 6608 : return slot;
4138 : }
4139 :
4140 : /*
4141 : * Insert remaining tuples for batch insert.
4142 : */
4143 110996 : if (estate->es_insert_pending_result_relations != NIL)
4144 24 : ExecPendingInserts(estate);
4145 :
4146 : /*
4147 : * We're done, but fire AFTER STATEMENT triggers before exiting.
4148 : */
4149 110996 : fireASTriggers(node);
4150 :
4151 110996 : node->mt_done = true;
4152 :
4153 110996 : return NULL;
4154 : }
4155 :
4156 : /*
4157 : * ExecLookupResultRelByOid
4158 : * If the table with given OID is among the result relations to be
4159 : * updated by the given ModifyTable node, return its ResultRelInfo.
4160 : *
4161 : * If not found, return NULL if missing_ok, else raise error.
4162 : *
4163 : * If update_cache is true, then upon successful lookup, update the node's
4164 : * one-element cache. ONLY ExecModifyTable may pass true for this.
4165 : */
4166 : ResultRelInfo *
4167 11164 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4168 : bool missing_ok, bool update_cache)
4169 : {
4170 11164 : if (node->mt_resultOidHash)
4171 : {
4172 : /* Use the pre-built hash table to locate the rel */
4173 : MTTargetRelLookup *mtlookup;
4174 :
4175 : mtlookup = (MTTargetRelLookup *)
4176 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4177 0 : if (mtlookup)
4178 : {
4179 0 : if (update_cache)
4180 : {
4181 0 : node->mt_lastResultOid = resultoid;
4182 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
4183 : }
4184 0 : return node->resultRelInfo + mtlookup->relationIndex;
4185 : }
4186 : }
4187 : else
4188 : {
4189 : /* With few target rels, just search the ResultRelInfo array */
4190 21470 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4191 : {
4192 13666 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4193 :
4194 13666 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4195 : {
4196 3360 : if (update_cache)
4197 : {
4198 2878 : node->mt_lastResultOid = resultoid;
4199 2878 : node->mt_lastResultIndex = ndx;
4200 : }
4201 3360 : return rInfo;
4202 : }
4203 : }
4204 : }
4205 :
4206 7804 : if (!missing_ok)
4207 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
4208 7804 : return NULL;
4209 : }
4210 :
4211 : /* ----------------------------------------------------------------
4212 : * ExecInitModifyTable
4213 : * ----------------------------------------------------------------
4214 : */
4215 : ModifyTableState *
4216 115120 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4217 : {
4218 : ModifyTableState *mtstate;
4219 115120 : Plan *subplan = outerPlan(node);
4220 115120 : CmdType operation = node->operation;
4221 115120 : int nrels = list_length(node->resultRelations);
4222 : ResultRelInfo *resultRelInfo;
4223 : List *arowmarks;
4224 : ListCell *l;
4225 : int i;
4226 : Relation rel;
4227 :
4228 : /* check for unsupported flags */
4229 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4230 :
4231 : /*
4232 : * create state structure
4233 : */
4234 115120 : mtstate = makeNode(ModifyTableState);
4235 115120 : mtstate->ps.plan = (Plan *) node;
4236 115120 : mtstate->ps.state = estate;
4237 115120 : mtstate->ps.ExecProcNode = ExecModifyTable;
4238 :
4239 115120 : mtstate->operation = operation;
4240 115120 : mtstate->canSetTag = node->canSetTag;
4241 115120 : mtstate->mt_done = false;
4242 :
4243 115120 : mtstate->mt_nrels = nrels;
4244 115120 : mtstate->resultRelInfo = (ResultRelInfo *)
4245 115120 : palloc(nrels * sizeof(ResultRelInfo));
4246 :
4247 115120 : mtstate->mt_merge_pending_not_matched = NULL;
4248 115120 : mtstate->mt_merge_inserted = 0;
4249 115120 : mtstate->mt_merge_updated = 0;
4250 115120 : mtstate->mt_merge_deleted = 0;
4251 :
4252 : /*----------
4253 : * Resolve the target relation. This is the same as:
4254 : *
4255 : * - the relation for which we will fire FOR STATEMENT triggers,
4256 : * - the relation into whose tuple format all captured transition tuples
4257 : * must be converted, and
4258 : * - the root partitioned table used for tuple routing.
4259 : *
4260 : * If it's a partitioned or inherited table, the root partition or
4261 : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4262 : * given explicitly in node->rootRelation. Otherwise, the target relation
4263 : * is the sole relation in the node->resultRelations list.
4264 : *----------
4265 : */
4266 115120 : if (node->rootRelation > 0)
4267 : {
4268 2426 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4269 2426 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4270 : node->rootRelation);
4271 : }
4272 : else
4273 : {
4274 : Assert(list_length(node->resultRelations) == 1);
4275 112694 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4276 112694 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4277 112694 : linitial_int(node->resultRelations));
4278 : }
4279 :
4280 : /* set up epqstate with dummy subplan data for the moment */
4281 115120 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4282 : node->epqParam, node->resultRelations);
4283 115120 : mtstate->fireBSTriggers = true;
4284 :
4285 : /*
4286 : * Build state for collecting transition tuples. This requires having a
4287 : * valid trigger query context, so skip it in explain-only mode.
4288 : */
4289 115120 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4290 114294 : ExecSetupTransitionCaptureState(mtstate, estate);
4291 :
4292 : /*
4293 : * Open all the result relations and initialize the ResultRelInfo structs.
4294 : * (But root relation was initialized above, if it's part of the array.)
4295 : * We must do this before initializing the subplan, because direct-modify
4296 : * FDWs expect their ResultRelInfos to be available.
4297 : */
4298 115120 : resultRelInfo = mtstate->resultRelInfo;
4299 115120 : i = 0;
4300 232286 : foreach(l, node->resultRelations)
4301 : {
4302 117440 : Index resultRelation = lfirst_int(l);
4303 117440 : List *mergeActions = NIL;
4304 :
4305 117440 : if (node->mergeActionLists)
4306 1636 : mergeActions = list_nth(node->mergeActionLists, i);
4307 :
4308 117440 : if (resultRelInfo != mtstate->rootResultRelInfo)
4309 : {
4310 4746 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4311 :
4312 : /*
4313 : * For child result relations, store the root result relation
4314 : * pointer. We do so for the convenience of places that want to
4315 : * look at the query's original target relation but don't have the
4316 : * mtstate handy.
4317 : */
4318 4746 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4319 : }
4320 :
4321 : /* Initialize the usesFdwDirectModify flag */
4322 117440 : resultRelInfo->ri_usesFdwDirectModify =
4323 117440 : bms_is_member(i, node->fdwDirectModifyPlans);
4324 :
4325 : /*
4326 : * Verify result relation is a valid target for the current operation
4327 : */
4328 117440 : CheckValidResultRel(resultRelInfo, operation, mergeActions);
4329 :
4330 117166 : resultRelInfo++;
4331 117166 : i++;
4332 : }
4333 :
4334 : /*
4335 : * Now we may initialize the subplan.
4336 : */
4337 114846 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4338 :
4339 : /*
4340 : * Do additional per-result-relation initialization.
4341 : */
4342 231978 : for (i = 0; i < nrels; i++)
4343 : {
4344 117132 : resultRelInfo = &mtstate->resultRelInfo[i];
4345 :
4346 : /* Let FDWs init themselves for foreign-table result rels */
4347 117132 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4348 116924 : resultRelInfo->ri_FdwRoutine != NULL &&
4349 310 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4350 : {
4351 310 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4352 :
4353 310 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4354 : resultRelInfo,
4355 : fdw_private,
4356 : i,
4357 : eflags);
4358 : }
4359 :
4360 : /*
4361 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4362 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4363 : * tables, the FDW might have created additional junk attr(s), but
4364 : * those are no concern of ours.
4365 : */
4366 117132 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4367 : operation == CMD_MERGE)
4368 : {
4369 : char relkind;
4370 :
4371 28062 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4372 28062 : if (relkind == RELKIND_RELATION ||
4373 650 : relkind == RELKIND_MATVIEW ||
4374 : relkind == RELKIND_PARTITIONED_TABLE)
4375 : {
4376 27448 : resultRelInfo->ri_RowIdAttNo =
4377 27448 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4378 27448 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4379 0 : elog(ERROR, "could not find junk ctid column");
4380 : }
4381 614 : else if (relkind == RELKIND_FOREIGN_TABLE)
4382 : {
4383 : /*
4384 : * We don't support MERGE with foreign tables for now. (It's
4385 : * problematic because the implementation uses CTID.)
4386 : */
4387 : Assert(operation != CMD_MERGE);
4388 :
4389 : /*
4390 : * When there is a row-level trigger, there should be a
4391 : * wholerow attribute. We also require it to be present in
4392 : * UPDATE and MERGE, so we can get the values of unchanged
4393 : * columns.
4394 : */
4395 344 : resultRelInfo->ri_RowIdAttNo =
4396 344 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4397 : "wholerow");
4398 344 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4399 194 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4400 0 : elog(ERROR, "could not find junk wholerow column");
4401 : }
4402 : else
4403 : {
4404 : /* Other valid target relkinds must provide wholerow */
4405 270 : resultRelInfo->ri_RowIdAttNo =
4406 270 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4407 : "wholerow");
4408 270 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4409 0 : elog(ERROR, "could not find junk wholerow column");
4410 : }
4411 : }
4412 : }
4413 :
4414 : /*
4415 : * If this is an inherited update/delete/merge, there will be a junk
4416 : * attribute named "tableoid" present in the subplan's targetlist. It
4417 : * will be used to identify the result relation for a given tuple to be
4418 : * updated/deleted/merged.
4419 : */
4420 114846 : mtstate->mt_resultOidAttno =
4421 114846 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4422 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || nrels == 1);
4423 114846 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4424 114846 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4425 :
4426 : /* Get the root target relation */
4427 114846 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4428 :
4429 : /*
4430 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4431 : * or MERGE might need this too, but only if it actually moves tuples
4432 : * between partitions; in that case setup is done by
4433 : * ExecCrossPartitionUpdate.
4434 : */
4435 114846 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4436 : operation == CMD_INSERT)
4437 5590 : mtstate->mt_partition_tuple_routing =
4438 5590 : ExecSetupPartitionTupleRouting(estate, rel);
4439 :
4440 : /*
4441 : * Initialize any WITH CHECK OPTION constraints if needed.
4442 : */
4443 114846 : resultRelInfo = mtstate->resultRelInfo;
4444 116180 : foreach(l, node->withCheckOptionLists)
4445 : {
4446 1334 : List *wcoList = (List *) lfirst(l);
4447 1334 : List *wcoExprs = NIL;
4448 : ListCell *ll;
4449 :
4450 3632 : foreach(ll, wcoList)
4451 : {
4452 2298 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4453 2298 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4454 : &mtstate->ps);
4455 :
4456 2298 : wcoExprs = lappend(wcoExprs, wcoExpr);
4457 : }
4458 :
4459 1334 : resultRelInfo->ri_WithCheckOptions = wcoList;
4460 1334 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4461 1334 : resultRelInfo++;
4462 : }
4463 :
4464 : /*
4465 : * Initialize RETURNING projections if needed.
4466 : */
4467 114846 : if (node->returningLists)
4468 : {
4469 : TupleTableSlot *slot;
4470 : ExprContext *econtext;
4471 :
4472 : /*
4473 : * Initialize result tuple slot and assign its rowtype using the first
4474 : * RETURNING list. We assume the rest will look the same.
4475 : */
4476 4178 : mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
4477 :
4478 : /* Set up a slot for the output of the RETURNING projection(s) */
4479 4178 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
4480 4178 : slot = mtstate->ps.ps_ResultTupleSlot;
4481 :
4482 : /* Need an econtext too */
4483 4178 : if (mtstate->ps.ps_ExprContext == NULL)
4484 4178 : ExecAssignExprContext(estate, &mtstate->ps);
4485 4178 : econtext = mtstate->ps.ps_ExprContext;
4486 :
4487 : /*
4488 : * Build a projection for each result rel.
4489 : */
4490 4178 : resultRelInfo = mtstate->resultRelInfo;
4491 8684 : foreach(l, node->returningLists)
4492 : {
4493 4506 : List *rlist = (List *) lfirst(l);
4494 :
4495 4506 : resultRelInfo->ri_returningList = rlist;
4496 4506 : resultRelInfo->ri_projectReturning =
4497 4506 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
4498 4506 : resultRelInfo->ri_RelationDesc->rd_att);
4499 4506 : resultRelInfo++;
4500 : }
4501 : }
4502 : else
4503 : {
4504 : /*
4505 : * We still must construct a dummy result tuple type, because InitPlan
4506 : * expects one (maybe should change that?).
4507 : */
4508 110668 : mtstate->ps.plan->targetlist = NIL;
4509 110668 : ExecInitResultTypeTL(&mtstate->ps);
4510 :
4511 110668 : mtstate->ps.ps_ExprContext = NULL;
4512 : }
4513 :
4514 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
4515 114846 : resultRelInfo = mtstate->resultRelInfo;
4516 114846 : if (node->onConflictAction != ONCONFLICT_NONE)
4517 : {
4518 : /* insert may only have one relation, inheritance is not expanded */
4519 : Assert(nrels == 1);
4520 1200 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4521 : }
4522 :
4523 : /*
4524 : * If needed, Initialize target list, projection and qual for ON CONFLICT
4525 : * DO UPDATE.
4526 : */
4527 114846 : if (node->onConflictAction == ONCONFLICT_UPDATE)
4528 : {
4529 900 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
4530 : ExprContext *econtext;
4531 : TupleDesc relationDesc;
4532 :
4533 : /* already exists if created by RETURNING processing above */
4534 900 : if (mtstate->ps.ps_ExprContext == NULL)
4535 632 : ExecAssignExprContext(estate, &mtstate->ps);
4536 :
4537 900 : econtext = mtstate->ps.ps_ExprContext;
4538 900 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4539 :
4540 : /* create state for DO UPDATE SET operation */
4541 900 : resultRelInfo->ri_onConflict = onconfl;
4542 :
4543 : /* initialize slot for the existing tuple */
4544 900 : onconfl->oc_Existing =
4545 900 : table_slot_create(resultRelInfo->ri_RelationDesc,
4546 900 : &mtstate->ps.state->es_tupleTable);
4547 :
4548 : /*
4549 : * Create the tuple slot for the UPDATE SET projection. We want a slot
4550 : * of the table's type here, because the slot will be used to insert
4551 : * into the table, and for RETURNING processing - which may access
4552 : * system attributes.
4553 : */
4554 900 : onconfl->oc_ProjSlot =
4555 900 : table_slot_create(resultRelInfo->ri_RelationDesc,
4556 900 : &mtstate->ps.state->es_tupleTable);
4557 :
4558 : /* build UPDATE SET projection state */
4559 900 : onconfl->oc_ProjInfo =
4560 900 : ExecBuildUpdateProjection(node->onConflictSet,
4561 : true,
4562 : node->onConflictCols,
4563 : relationDesc,
4564 : econtext,
4565 : onconfl->oc_ProjSlot,
4566 : &mtstate->ps);
4567 :
4568 : /* initialize state to evaluate the WHERE clause, if any */
4569 900 : if (node->onConflictWhere)
4570 : {
4571 : ExprState *qualexpr;
4572 :
4573 176 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
4574 : &mtstate->ps);
4575 176 : onconfl->oc_WhereClause = qualexpr;
4576 : }
4577 : }
4578 :
4579 : /*
4580 : * If we have any secondary relations in an UPDATE or DELETE, they need to
4581 : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
4582 : * EvalPlanQual mechanism needs to be told about them. This also goes for
4583 : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
4584 : */
4585 114846 : arowmarks = NIL;
4586 117382 : foreach(l, node->rowMarks)
4587 : {
4588 2536 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
4589 : ExecRowMark *erm;
4590 : ExecAuxRowMark *aerm;
4591 :
4592 : /* ignore "parent" rowmarks; they are irrelevant at runtime */
4593 2536 : if (rc->isParent)
4594 100 : continue;
4595 :
4596 : /* Find ExecRowMark and build ExecAuxRowMark */
4597 2436 : erm = ExecFindRowMark(estate, rc->rti, false);
4598 2436 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4599 2436 : arowmarks = lappend(arowmarks, aerm);
4600 : }
4601 :
4602 : /* For a MERGE command, initialize its state */
4603 114846 : if (mtstate->operation == CMD_MERGE)
4604 1404 : ExecInitMerge(mtstate, estate);
4605 :
4606 114846 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4607 :
4608 : /*
4609 : * If there are a lot of result relations, use a hash table to speed the
4610 : * lookups. If there are not a lot, a simple linear search is faster.
4611 : *
4612 : * It's not clear where the threshold is, but try 64 for starters. In a
4613 : * debugging build, use a small threshold so that we get some test
4614 : * coverage of both code paths.
4615 : */
4616 : #ifdef USE_ASSERT_CHECKING
4617 : #define MT_NRELS_HASH 4
4618 : #else
4619 : #define MT_NRELS_HASH 64
4620 : #endif
4621 114846 : if (nrels >= MT_NRELS_HASH)
4622 : {
4623 : HASHCTL hash_ctl;
4624 :
4625 0 : hash_ctl.keysize = sizeof(Oid);
4626 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4627 0 : hash_ctl.hcxt = CurrentMemoryContext;
4628 0 : mtstate->mt_resultOidHash =
4629 0 : hash_create("ModifyTable target hash",
4630 : nrels, &hash_ctl,
4631 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
4632 0 : for (i = 0; i < nrels; i++)
4633 : {
4634 : Oid hashkey;
4635 : MTTargetRelLookup *mtlookup;
4636 : bool found;
4637 :
4638 0 : resultRelInfo = &mtstate->resultRelInfo[i];
4639 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
4640 : mtlookup = (MTTargetRelLookup *)
4641 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
4642 : HASH_ENTER, &found);
4643 : Assert(!found);
4644 0 : mtlookup->relationIndex = i;
4645 : }
4646 : }
4647 : else
4648 114846 : mtstate->mt_resultOidHash = NULL;
4649 :
4650 : /*
4651 : * Determine if the FDW supports batch insert and determine the batch size
4652 : * (a FDW may support batching, but it may be disabled for the
4653 : * server/table).
4654 : *
4655 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
4656 : * remains set to 0.
4657 : */
4658 114846 : if (operation == CMD_INSERT)
4659 : {
4660 : /* insert may only have one relation, inheritance is not expanded */
4661 : Assert(nrels == 1);
4662 89070 : resultRelInfo = mtstate->resultRelInfo;
4663 89070 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4664 89070 : resultRelInfo->ri_FdwRoutine != NULL &&
4665 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
4666 174 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
4667 : {
4668 174 : resultRelInfo->ri_BatchSize =
4669 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
4670 174 : Assert(resultRelInfo->ri_BatchSize >= 1);
4671 : }
4672 : else
4673 88896 : resultRelInfo->ri_BatchSize = 1;
4674 : }
4675 :
4676 : /*
4677 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
4678 : * to estate->es_auxmodifytables so that it will be run to completion by
4679 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
4680 : * ModifyTable node too, but there's no need.) Note the use of lcons not
4681 : * lappend: we need later-initialized ModifyTable nodes to be shut down
4682 : * before earlier ones. This ensures that we don't throw away RETURNING
4683 : * rows that need to be seen by a later CTE subplan.
4684 : */
4685 114846 : if (!mtstate->canSetTag)
4686 912 : estate->es_auxmodifytables = lcons(mtstate,
4687 : estate->es_auxmodifytables);
4688 :
4689 114846 : return mtstate;
4690 : }
4691 :
4692 : /* ----------------------------------------------------------------
4693 : * ExecEndModifyTable
4694 : *
4695 : * Shuts down the plan.
4696 : *
4697 : * Returns nothing of interest.
4698 : * ----------------------------------------------------------------
4699 : */
4700 : void
4701 110960 : ExecEndModifyTable(ModifyTableState *node)
4702 : {
4703 : int i;
4704 :
4705 : /*
4706 : * Allow any FDWs to shut down
4707 : */
4708 223940 : for (i = 0; i < node->mt_nrels; i++)
4709 : {
4710 : int j;
4711 112980 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
4712 :
4713 112980 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4714 112788 : resultRelInfo->ri_FdwRoutine != NULL &&
4715 290 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
4716 290 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
4717 : resultRelInfo);
4718 :
4719 : /*
4720 : * Cleanup the initialized batch slots. This only matters for FDWs
4721 : * with batching, but the other cases will have ri_NumSlotsInitialized
4722 : * == 0.
4723 : */
4724 113036 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
4725 : {
4726 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
4727 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
4728 : }
4729 : }
4730 :
4731 : /*
4732 : * Close all the partitioned tables, leaf partitions, and their indices
4733 : * and release the slot used for tuple routing, if set.
4734 : */
4735 110960 : if (node->mt_partition_tuple_routing)
4736 : {
4737 5608 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
4738 :
4739 5608 : if (node->mt_root_tuple_slot)
4740 542 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
4741 : }
4742 :
4743 : /*
4744 : * Terminate EPQ execution if active
4745 : */
4746 110960 : EvalPlanQualEnd(&node->mt_epqstate);
4747 :
4748 : /*
4749 : * shut down subplan
4750 : */
4751 110960 : ExecEndNode(outerPlanState(node));
4752 110960 : }
4753 :
4754 : void
4755 0 : ExecReScanModifyTable(ModifyTableState *node)
4756 : {
4757 : /*
4758 : * Currently, we don't need to support rescan on ModifyTable nodes. The
4759 : * semantics of that would be a bit debatable anyway.
4760 : */
4761 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
4762 : }
|