Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * The relation to modify can be an ordinary table, a foreign table, or a
28 : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : * targeted a view not in one of those two categories, earlier processing
31 : * already pointed the ModifyTable result relation to an underlying
32 : * relation of that other view. This node does process
33 : * ri_WithCheckOptions, which may have expressions from those other,
34 : * automatically updatable views.
35 : *
36 : * MERGE runs a join between the source relation and the target table.
37 : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : * is an outer join that might output tuples without a matching target
39 : * tuple. In this case, any unmatched target tuples will have NULL
40 : * row-locating info, and only INSERT can be run. But for matched target
41 : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : * SOURCE, all tuples produced by the join will include a matching target
44 : * tuple, so all tuples contain row-locating info.
45 : *
46 : * If the query specifies RETURNING, then the ModifyTable returns a
47 : * RETURNING tuple after completing each row insert, update, or delete.
48 : * It must be called again to continue the operation. Without RETURNING,
49 : * we just loop within the node until all the work is done, then
50 : * return NULL. This avoids useless call/return overhead.
51 : */
52 :
53 : #include "postgres.h"
54 :
55 : #include "access/htup_details.h"
56 : #include "access/tableam.h"
57 : #include "access/xact.h"
58 : #include "commands/trigger.h"
59 : #include "executor/execPartition.h"
60 : #include "executor/executor.h"
61 : #include "executor/nodeModifyTable.h"
62 : #include "foreign/fdwapi.h"
63 : #include "miscadmin.h"
64 : #include "nodes/nodeFuncs.h"
65 : #include "optimizer/optimizer.h"
66 : #include "rewrite/rewriteHandler.h"
67 : #include "storage/lmgr.h"
68 : #include "utils/builtins.h"
69 : #include "utils/datum.h"
70 : #include "utils/rel.h"
71 : #include "utils/snapmgr.h"
72 :
73 :
74 : typedef struct MTTargetRelLookup
75 : {
76 : Oid relationOid; /* hash key, must be first */
77 : int relationIndex; /* rel's index in resultRelInfo[] array */
78 : } MTTargetRelLookup;
79 :
80 : /*
81 : * Context struct for a ModifyTable operation, containing basic execution
82 : * state and some output variables populated by ExecUpdateAct() and
83 : * ExecDeleteAct() to report the result of their actions to callers.
84 : */
85 : typedef struct ModifyTableContext
86 : {
87 : /* Operation state */
88 : ModifyTableState *mtstate;
89 : EPQState *epqstate;
90 : EState *estate;
91 :
92 : /*
93 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
94 : * access "junk" columns that are not going to be stored.
95 : */
96 : TupleTableSlot *planSlot;
97 :
98 : /*
99 : * Information about the changes that were made concurrently to a tuple
100 : * being updated or deleted
101 : */
102 : TM_FailureData tmfd;
103 :
104 : /*
105 : * The tuple projected by the INSERT's RETURNING clause, when doing a
106 : * cross-partition UPDATE
107 : */
108 : TupleTableSlot *cpUpdateReturningSlot;
109 : } ModifyTableContext;
110 :
111 : /*
112 : * Context struct containing output data specific to UPDATE operations.
113 : */
114 : typedef struct UpdateContext
115 : {
116 : bool crossPartUpdate; /* was it a cross-partition update? */
117 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
118 :
119 : /*
120 : * Lock mode to acquire on the latest tuple version before performing
121 : * EvalPlanQual on it
122 : */
123 : LockTupleMode lockmode;
124 : } UpdateContext;
125 :
126 :
127 : static void ExecBatchInsert(ModifyTableState *mtstate,
128 : ResultRelInfo *resultRelInfo,
129 : TupleTableSlot **slots,
130 : TupleTableSlot **planSlots,
131 : int numSlots,
132 : EState *estate,
133 : bool canSetTag);
134 : static void ExecPendingInserts(EState *estate);
135 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
136 : ResultRelInfo *sourcePartInfo,
137 : ResultRelInfo *destPartInfo,
138 : ItemPointer tupleid,
139 : TupleTableSlot *oldslot,
140 : TupleTableSlot *newslot);
141 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
142 : ResultRelInfo *resultRelInfo,
143 : ItemPointer conflictTid,
144 : TupleTableSlot *excludedSlot,
145 : bool canSetTag,
146 : TupleTableSlot **returning);
147 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
148 : EState *estate,
149 : PartitionTupleRouting *proute,
150 : ResultRelInfo *targetRelInfo,
151 : TupleTableSlot *slot,
152 : ResultRelInfo **partRelInfo);
153 :
154 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
155 : ResultRelInfo *resultRelInfo,
156 : ItemPointer tupleid,
157 : HeapTuple oldtuple,
158 : bool canSetTag);
159 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
160 : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
161 : ResultRelInfo *resultRelInfo,
162 : ItemPointer tupleid,
163 : HeapTuple oldtuple,
164 : bool canSetTag,
165 : bool *matched);
166 : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
167 : ResultRelInfo *resultRelInfo,
168 : bool canSetTag);
169 :
170 :
171 : /*
172 : * Verify that the tuples to be produced by INSERT match the
173 : * target relation's rowtype
174 : *
175 : * We do this to guard against stale plans. If plan invalidation is
176 : * functioning properly then we should never get a failure here, but better
177 : * safe than sorry. Note that this is called after we have obtained lock
178 : * on the target rel, so the rowtype can't change underneath us.
179 : *
180 : * The plan output is represented by its targetlist, because that makes
181 : * handling the dropped-column case easier.
182 : *
183 : * We used to use this for UPDATE as well, but now the equivalent checks
184 : * are done in ExecBuildUpdateProjection.
185 : */
186 : static void
187 91796 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
188 : {
189 91796 : TupleDesc resultDesc = RelationGetDescr(resultRel);
190 91796 : int attno = 0;
191 : ListCell *lc;
192 :
193 279926 : foreach(lc, targetList)
194 : {
195 188130 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
196 : Form_pg_attribute attr;
197 :
198 : Assert(!tle->resjunk); /* caller removed junk items already */
199 :
200 188130 : if (attno >= resultDesc->natts)
201 0 : ereport(ERROR,
202 : (errcode(ERRCODE_DATATYPE_MISMATCH),
203 : errmsg("table row type and query-specified row type do not match"),
204 : errdetail("Query has too many columns.")));
205 188130 : attr = TupleDescAttr(resultDesc, attno);
206 188130 : attno++;
207 :
208 188130 : if (!attr->attisdropped)
209 : {
210 : /* Normal case: demand type match */
211 187514 : if (exprType((Node *) tle->expr) != attr->atttypid)
212 0 : ereport(ERROR,
213 : (errcode(ERRCODE_DATATYPE_MISMATCH),
214 : errmsg("table row type and query-specified row type do not match"),
215 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
216 : format_type_be(attr->atttypid),
217 : attno,
218 : format_type_be(exprType((Node *) tle->expr)))));
219 : }
220 : else
221 : {
222 : /*
223 : * For a dropped column, we can't check atttypid (it's likely 0).
224 : * In any case the planner has most likely inserted an INT4 null.
225 : * What we insist on is just *some* NULL constant.
226 : */
227 616 : if (!IsA(tle->expr, Const) ||
228 616 : !((Const *) tle->expr)->constisnull)
229 0 : ereport(ERROR,
230 : (errcode(ERRCODE_DATATYPE_MISMATCH),
231 : errmsg("table row type and query-specified row type do not match"),
232 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
233 : attno)));
234 : }
235 : }
236 91796 : if (attno != resultDesc->natts)
237 0 : ereport(ERROR,
238 : (errcode(ERRCODE_DATATYPE_MISMATCH),
239 : errmsg("table row type and query-specified row type do not match"),
240 : errdetail("Query has too few columns.")));
241 91796 : }
242 :
243 : /*
244 : * ExecProcessReturning --- evaluate a RETURNING list
245 : *
246 : * resultRelInfo: current result rel
247 : * tupleSlot: slot holding tuple actually inserted/updated/deleted
248 : * planSlot: slot holding tuple returned by top subplan node
249 : *
250 : * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
251 : * scan tuple.
252 : *
253 : * Returns a slot holding the result tuple
254 : */
255 : static TupleTableSlot *
256 7476 : ExecProcessReturning(ResultRelInfo *resultRelInfo,
257 : TupleTableSlot *tupleSlot,
258 : TupleTableSlot *planSlot)
259 : {
260 7476 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
261 7476 : ExprContext *econtext = projectReturning->pi_exprContext;
262 :
263 : /* Make tuple and any needed join variables available to ExecProject */
264 7476 : if (tupleSlot)
265 6782 : econtext->ecxt_scantuple = tupleSlot;
266 7476 : econtext->ecxt_outertuple = planSlot;
267 :
268 : /*
269 : * RETURNING expressions might reference the tableoid column, so
270 : * reinitialize tts_tableOid before evaluating them.
271 : */
272 7476 : econtext->ecxt_scantuple->tts_tableOid =
273 7476 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
274 :
275 : /* Compute the RETURNING expressions */
276 7476 : return ExecProject(projectReturning);
277 : }
278 :
279 : /*
280 : * ExecCheckTupleVisible -- verify tuple is visible
281 : *
282 : * It would not be consistent with guarantees of the higher isolation levels to
283 : * proceed with avoiding insertion (taking speculative insertion's alternative
284 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
285 : * Check for the need to raise a serialization failure, and do so as necessary.
286 : */
287 : static void
288 5240 : ExecCheckTupleVisible(EState *estate,
289 : Relation rel,
290 : TupleTableSlot *slot)
291 : {
292 5240 : if (!IsolationUsesXactSnapshot())
293 5176 : return;
294 :
295 64 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
296 : {
297 : Datum xminDatum;
298 : TransactionId xmin;
299 : bool isnull;
300 :
301 40 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
302 : Assert(!isnull);
303 40 : xmin = DatumGetTransactionId(xminDatum);
304 :
305 : /*
306 : * We should not raise a serialization failure if the conflict is
307 : * against a tuple inserted by our own transaction, even if it's not
308 : * visible to our snapshot. (This would happen, for example, if
309 : * conflicting keys are proposed for insertion in a single command.)
310 : */
311 40 : if (!TransactionIdIsCurrentTransactionId(xmin))
312 20 : ereport(ERROR,
313 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
314 : errmsg("could not serialize access due to concurrent update")));
315 : }
316 : }
317 :
318 : /*
319 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
320 : */
321 : static void
322 212 : ExecCheckTIDVisible(EState *estate,
323 : ResultRelInfo *relinfo,
324 : ItemPointer tid,
325 : TupleTableSlot *tempSlot)
326 : {
327 212 : Relation rel = relinfo->ri_RelationDesc;
328 :
329 : /* Redundantly check isolation level */
330 212 : if (!IsolationUsesXactSnapshot())
331 148 : return;
332 :
333 64 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
334 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
335 64 : ExecCheckTupleVisible(estate, rel, tempSlot);
336 44 : ExecClearTuple(tempSlot);
337 : }
338 :
339 : /*
340 : * Initialize to compute stored generated columns for a tuple
341 : *
342 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI
343 : * or ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
344 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
345 : *
346 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
347 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
348 : * cross-partition UPDATEs, since a partition might be the target of both
349 : * UPDATE and INSERT actions.
350 : */
351 : void
352 58746 : ExecInitStoredGenerated(ResultRelInfo *resultRelInfo,
353 : EState *estate,
354 : CmdType cmdtype)
355 : {
356 58746 : Relation rel = resultRelInfo->ri_RelationDesc;
357 58746 : TupleDesc tupdesc = RelationGetDescr(rel);
358 58746 : int natts = tupdesc->natts;
359 : ExprState **ri_GeneratedExprs;
360 : int ri_NumGeneratedNeeded;
361 : Bitmapset *updatedCols;
362 : MemoryContext oldContext;
363 :
364 : /* Nothing to do if no generated columns */
365 58746 : if (!(tupdesc->constr && tupdesc->constr->has_generated_stored))
366 57792 : return;
367 :
368 : /*
369 : * In an UPDATE, we can skip computing any generated columns that do not
370 : * depend on any UPDATE target column. But if there is a BEFORE ROW
371 : * UPDATE trigger, we cannot skip because the trigger might change more
372 : * columns.
373 : */
374 954 : if (cmdtype == CMD_UPDATE &&
375 230 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
376 204 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
377 : else
378 750 : updatedCols = NULL;
379 :
380 : /*
381 : * Make sure these data structures are built in the per-query memory
382 : * context so they'll survive throughout the query.
383 : */
384 954 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
385 :
386 954 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
387 954 : ri_NumGeneratedNeeded = 0;
388 :
389 3682 : for (int i = 0; i < natts; i++)
390 : {
391 2728 : if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
392 : {
393 : Expr *expr;
394 :
395 : /* Fetch the GENERATED AS expression tree */
396 978 : expr = (Expr *) build_column_default(rel, i + 1);
397 978 : if (expr == NULL)
398 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
399 : i + 1, RelationGetRelationName(rel));
400 :
401 : /*
402 : * If it's an update with a known set of update target columns,
403 : * see if we can skip the computation.
404 : */
405 978 : if (updatedCols)
406 : {
407 210 : Bitmapset *attrs_used = NULL;
408 :
409 210 : pull_varattnos((Node *) expr, 1, &attrs_used);
410 :
411 210 : if (!bms_overlap(updatedCols, attrs_used))
412 24 : continue; /* need not update this column */
413 : }
414 :
415 : /* No luck, so prepare the expression for execution */
416 954 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
417 954 : ri_NumGeneratedNeeded++;
418 :
419 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
420 954 : if (cmdtype == CMD_UPDATE)
421 212 : resultRelInfo->ri_extraUpdatedCols =
422 212 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
423 : i + 1 - FirstLowInvalidHeapAttributeNumber);
424 : }
425 : }
426 :
427 : /* Save in appropriate set of fields */
428 954 : if (cmdtype == CMD_UPDATE)
429 : {
430 : /* Don't call twice */
431 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
432 :
433 230 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
434 230 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
435 : }
436 : else
437 : {
438 : /* Don't call twice */
439 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
440 :
441 724 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
442 724 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
443 : }
444 :
445 954 : MemoryContextSwitchTo(oldContext);
446 : }
447 :
448 : /*
449 : * Compute stored generated columns for a tuple
450 : */
451 : void
452 1266 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
453 : EState *estate, TupleTableSlot *slot,
454 : CmdType cmdtype)
455 : {
456 1266 : Relation rel = resultRelInfo->ri_RelationDesc;
457 1266 : TupleDesc tupdesc = RelationGetDescr(rel);
458 1266 : int natts = tupdesc->natts;
459 1266 : ExprContext *econtext = GetPerTupleExprContext(estate);
460 : ExprState **ri_GeneratedExprs;
461 : MemoryContext oldContext;
462 : Datum *values;
463 : bool *nulls;
464 :
465 : /* We should not be called unless this is true */
466 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
467 :
468 : /*
469 : * Initialize the expressions if we didn't already, and check whether we
470 : * can exit early because nothing needs to be computed.
471 : */
472 1266 : if (cmdtype == CMD_UPDATE)
473 : {
474 266 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
475 204 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
476 266 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
477 18 : return;
478 248 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
479 : }
480 : else
481 : {
482 1000 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
483 724 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
484 : /* Early exit is impossible given the prior Assert */
485 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
486 1000 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
487 : }
488 :
489 1248 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
490 :
491 1248 : values = palloc(sizeof(*values) * natts);
492 1248 : nulls = palloc(sizeof(*nulls) * natts);
493 :
494 1248 : slot_getallattrs(slot);
495 1248 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
496 :
497 4692 : for (int i = 0; i < natts; i++)
498 : {
499 3456 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
500 :
501 3456 : if (ri_GeneratedExprs[i])
502 : {
503 : Datum val;
504 : bool isnull;
505 :
506 : Assert(attr->attgenerated == ATTRIBUTE_GENERATED_STORED);
507 :
508 1270 : econtext->ecxt_scantuple = slot;
509 :
510 1270 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
511 :
512 : /*
513 : * We must make a copy of val as we have no guarantees about where
514 : * memory for a pass-by-reference Datum is located.
515 : */
516 1258 : if (!isnull)
517 1216 : val = datumCopy(val, attr->attbyval, attr->attlen);
518 :
519 1258 : values[i] = val;
520 1258 : nulls[i] = isnull;
521 : }
522 : else
523 : {
524 2186 : if (!nulls[i])
525 2144 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
526 : }
527 : }
528 :
529 1236 : ExecClearTuple(slot);
530 1236 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
531 1236 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
532 1236 : ExecStoreVirtualTuple(slot);
533 1236 : ExecMaterializeSlot(slot);
534 :
535 1236 : MemoryContextSwitchTo(oldContext);
536 : }
537 :
538 : /*
539 : * ExecInitInsertProjection
540 : * Do one-time initialization of projection data for INSERT tuples.
541 : *
542 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
543 : *
544 : * This is also a convenient place to verify that the
545 : * output of an INSERT matches the target table.
546 : */
547 : static void
548 90816 : ExecInitInsertProjection(ModifyTableState *mtstate,
549 : ResultRelInfo *resultRelInfo)
550 : {
551 90816 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
552 90816 : Plan *subplan = outerPlan(node);
553 90816 : EState *estate = mtstate->ps.state;
554 90816 : List *insertTargetList = NIL;
555 90816 : bool need_projection = false;
556 : ListCell *l;
557 :
558 : /* Extract non-junk columns of the subplan's result tlist. */
559 276484 : foreach(l, subplan->targetlist)
560 : {
561 185668 : TargetEntry *tle = (TargetEntry *) lfirst(l);
562 :
563 185668 : if (!tle->resjunk)
564 185668 : insertTargetList = lappend(insertTargetList, tle);
565 : else
566 0 : need_projection = true;
567 : }
568 :
569 : /*
570 : * The junk-free list must produce a tuple suitable for the result
571 : * relation.
572 : */
573 90816 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
574 :
575 : /* We'll need a slot matching the table's format. */
576 90816 : resultRelInfo->ri_newTupleSlot =
577 90816 : table_slot_create(resultRelInfo->ri_RelationDesc,
578 : &estate->es_tupleTable);
579 :
580 : /* Build ProjectionInfo if needed (it probably isn't). */
581 90816 : if (need_projection)
582 : {
583 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
584 :
585 : /* need an expression context to do the projection */
586 0 : if (mtstate->ps.ps_ExprContext == NULL)
587 0 : ExecAssignExprContext(estate, &mtstate->ps);
588 :
589 0 : resultRelInfo->ri_projectNew =
590 0 : ExecBuildProjectionInfo(insertTargetList,
591 : mtstate->ps.ps_ExprContext,
592 : resultRelInfo->ri_newTupleSlot,
593 : &mtstate->ps,
594 : relDesc);
595 : }
596 :
597 90816 : resultRelInfo->ri_projectNewInfoValid = true;
598 90816 : }
599 :
600 : /*
601 : * ExecInitUpdateProjection
602 : * Do one-time initialization of projection data for UPDATE tuples.
603 : *
604 : * UPDATE always needs a projection, because (1) there's always some junk
605 : * attrs, and (2) we may need to merge values of not-updated columns from
606 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
607 : * the subplan contains only new values for the changed columns, plus row
608 : * identity info in the junk attrs.
609 : *
610 : * This is "one-time" for any given result rel, but we might touch more than
611 : * one result rel in the course of an inherited UPDATE, and each one needs
612 : * its own projection due to possible column order variation.
613 : *
614 : * This is also a convenient place to verify that the output of an UPDATE
615 : * matches the target table (ExecBuildUpdateProjection does that).
616 : */
617 : static void
618 12472 : ExecInitUpdateProjection(ModifyTableState *mtstate,
619 : ResultRelInfo *resultRelInfo)
620 : {
621 12472 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
622 12472 : Plan *subplan = outerPlan(node);
623 12472 : EState *estate = mtstate->ps.state;
624 12472 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
625 : int whichrel;
626 : List *updateColnos;
627 :
628 : /*
629 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
630 : * to, we can get the index the hard way with an integer division.
631 : */
632 12472 : whichrel = mtstate->mt_lastResultIndex;
633 12472 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
634 : {
635 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
636 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
637 : }
638 :
639 12472 : updateColnos = (List *) list_nth(node->updateColnosLists, whichrel);
640 :
641 : /*
642 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
643 : * produced by the subplan to get the new tuple. We need two slots, both
644 : * matching the table's desired format.
645 : */
646 12472 : resultRelInfo->ri_oldTupleSlot =
647 12472 : table_slot_create(resultRelInfo->ri_RelationDesc,
648 : &estate->es_tupleTable);
649 12472 : resultRelInfo->ri_newTupleSlot =
650 12472 : table_slot_create(resultRelInfo->ri_RelationDesc,
651 : &estate->es_tupleTable);
652 :
653 : /* need an expression context to do the projection */
654 12472 : if (mtstate->ps.ps_ExprContext == NULL)
655 11278 : ExecAssignExprContext(estate, &mtstate->ps);
656 :
657 12472 : resultRelInfo->ri_projectNew =
658 12472 : ExecBuildUpdateProjection(subplan->targetlist,
659 : false, /* subplan did the evaluation */
660 : updateColnos,
661 : relDesc,
662 : mtstate->ps.ps_ExprContext,
663 : resultRelInfo->ri_newTupleSlot,
664 : &mtstate->ps);
665 :
666 12472 : resultRelInfo->ri_projectNewInfoValid = true;
667 12472 : }
668 :
669 : /*
670 : * ExecGetInsertNewTuple
671 : * This prepares a "new" tuple ready to be inserted into given result
672 : * relation, by removing any junk columns of the plan's output tuple
673 : * and (if necessary) coercing the tuple to the right tuple format.
674 : */
675 : static TupleTableSlot *
676 11396030 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
677 : TupleTableSlot *planSlot)
678 : {
679 11396030 : ProjectionInfo *newProj = relinfo->ri_projectNew;
680 : ExprContext *econtext;
681 :
682 : /*
683 : * If there's no projection to be done, just make sure the slot is of the
684 : * right type for the target rel. If the planSlot is the right type we
685 : * can use it as-is, else copy the data into ri_newTupleSlot.
686 : */
687 11396030 : if (newProj == NULL)
688 : {
689 11396030 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
690 : {
691 10625044 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
692 10625044 : return relinfo->ri_newTupleSlot;
693 : }
694 : else
695 770986 : return planSlot;
696 : }
697 :
698 : /*
699 : * Else project; since the projection output slot is ri_newTupleSlot, this
700 : * will also fix any slot-type problem.
701 : *
702 : * Note: currently, this is dead code, because INSERT cases don't receive
703 : * any junk columns so there's never a projection to be done.
704 : */
705 0 : econtext = newProj->pi_exprContext;
706 0 : econtext->ecxt_outertuple = planSlot;
707 0 : return ExecProject(newProj);
708 : }
709 :
710 : /*
711 : * ExecGetUpdateNewTuple
712 : * This prepares a "new" tuple by combining an UPDATE subplan's output
713 : * tuple (which contains values of changed columns) with unchanged
714 : * columns taken from the old tuple.
715 : *
716 : * The subplan tuple might also contain junk columns, which are ignored.
717 : * Note that the projection also ensures we have a slot of the right type.
718 : */
719 : TupleTableSlot *
720 308974 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
721 : TupleTableSlot *planSlot,
722 : TupleTableSlot *oldSlot)
723 : {
724 308974 : ProjectionInfo *newProj = relinfo->ri_projectNew;
725 : ExprContext *econtext;
726 :
727 : /* Use a few extra Asserts to protect against outside callers */
728 : Assert(relinfo->ri_projectNewInfoValid);
729 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
730 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
731 :
732 308974 : econtext = newProj->pi_exprContext;
733 308974 : econtext->ecxt_outertuple = planSlot;
734 308974 : econtext->ecxt_scantuple = oldSlot;
735 308974 : return ExecProject(newProj);
736 : }
737 :
738 : /* ----------------------------------------------------------------
739 : * ExecInsert
740 : *
741 : * For INSERT, we have to insert the tuple into the target relation
742 : * (or partition thereof) and insert appropriate tuples into the index
743 : * relations.
744 : *
745 : * slot contains the new tuple value to be stored.
746 : *
747 : * Returns RETURNING result if any, otherwise NULL.
748 : * *inserted_tuple is the tuple that's effectively inserted;
749 : * *insert_destrel is the relation where it was inserted.
750 : * These are only set on success.
751 : *
752 : * This may change the currently active tuple conversion map in
753 : * mtstate->mt_transition_capture, so the callers must take care to
754 : * save the previous value to avoid losing track of it.
755 : * ----------------------------------------------------------------
756 : */
757 : static TupleTableSlot *
758 11398716 : ExecInsert(ModifyTableContext *context,
759 : ResultRelInfo *resultRelInfo,
760 : TupleTableSlot *slot,
761 : bool canSetTag,
762 : TupleTableSlot **inserted_tuple,
763 : ResultRelInfo **insert_destrel)
764 : {
765 11398716 : ModifyTableState *mtstate = context->mtstate;
766 11398716 : EState *estate = context->estate;
767 : Relation resultRelationDesc;
768 11398716 : List *recheckIndexes = NIL;
769 11398716 : TupleTableSlot *planSlot = context->planSlot;
770 11398716 : TupleTableSlot *result = NULL;
771 : TransitionCaptureState *ar_insert_trig_tcs;
772 11398716 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
773 11398716 : OnConflictAction onconflict = node->onConflictAction;
774 11398716 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
775 : MemoryContext oldContext;
776 :
777 : /*
778 : * If the input result relation is a partitioned table, find the leaf
779 : * partition to insert the tuple into.
780 : */
781 11398716 : if (proute)
782 : {
783 : ResultRelInfo *partRelInfo;
784 :
785 722156 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
786 : resultRelInfo, slot,
787 : &partRelInfo);
788 721952 : resultRelInfo = partRelInfo;
789 : }
790 :
791 11398512 : ExecMaterializeSlot(slot);
792 :
793 11398512 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
794 :
795 : /*
796 : * Open the table's indexes, if we have not done so already, so that we
797 : * can add new index entries for the inserted tuple.
798 : */
799 11398512 : if (resultRelationDesc->rd_rel->relhasindex &&
800 3009668 : resultRelInfo->ri_IndexRelationDescs == NULL)
801 31698 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
802 :
803 : /*
804 : * BEFORE ROW INSERT Triggers.
805 : *
806 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
807 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
808 : * violations before firing these triggers, because they can change the
809 : * values to insert. Also, they can run arbitrary user-defined code with
810 : * side-effects that we can't cancel by just not inserting the tuple.
811 : */
812 11398512 : if (resultRelInfo->ri_TrigDesc &&
813 75274 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
814 : {
815 : /* Flush any pending inserts, so rows are visible to the triggers */
816 2120 : if (estate->es_insert_pending_result_relations != NIL)
817 6 : ExecPendingInserts(estate);
818 :
819 2120 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
820 200 : return NULL; /* "do nothing" */
821 : }
822 :
823 : /* INSTEAD OF ROW INSERT Triggers */
824 11398196 : if (resultRelInfo->ri_TrigDesc &&
825 74958 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
826 : {
827 168 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
828 6 : return NULL; /* "do nothing" */
829 : }
830 11398028 : else if (resultRelInfo->ri_FdwRoutine)
831 : {
832 : /*
833 : * GENERATED expressions might reference the tableoid column, so
834 : * (re-)initialize tts_tableOid before evaluating them.
835 : */
836 2014 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
837 :
838 : /*
839 : * Compute stored generated columns
840 : */
841 2014 : if (resultRelationDesc->rd_att->constr &&
842 366 : resultRelationDesc->rd_att->constr->has_generated_stored)
843 8 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
844 : CMD_INSERT);
845 :
846 : /*
847 : * If the FDW supports batching, and batching is requested, accumulate
848 : * rows and insert them in batches. Otherwise use the per-row inserts.
849 : */
850 2014 : if (resultRelInfo->ri_BatchSize > 1)
851 : {
852 288 : bool flushed = false;
853 :
854 : /*
855 : * When we've reached the desired batch size, perform the
856 : * insertion.
857 : */
858 288 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
859 : {
860 20 : ExecBatchInsert(mtstate, resultRelInfo,
861 : resultRelInfo->ri_Slots,
862 : resultRelInfo->ri_PlanSlots,
863 : resultRelInfo->ri_NumSlots,
864 : estate, canSetTag);
865 20 : flushed = true;
866 : }
867 :
868 288 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
869 :
870 288 : if (resultRelInfo->ri_Slots == NULL)
871 : {
872 56 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
873 28 : resultRelInfo->ri_BatchSize);
874 28 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
875 28 : resultRelInfo->ri_BatchSize);
876 : }
877 :
878 : /*
879 : * Initialize the batch slots. We don't know how many slots will
880 : * be needed, so we initialize them as the batch grows, and we
881 : * keep them across batches. To mitigate an inefficiency in how
882 : * resource owner handles objects with many references (as with
883 : * many slots all referencing the same tuple descriptor) we copy
884 : * the appropriate tuple descriptor for each slot.
885 : */
886 288 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
887 : {
888 142 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
889 : TupleDesc plan_tdesc =
890 142 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
891 :
892 284 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
893 142 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
894 :
895 284 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
896 142 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
897 :
898 : /* remember how many batch slots we initialized */
899 142 : resultRelInfo->ri_NumSlotsInitialized++;
900 : }
901 :
902 288 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
903 : slot);
904 :
905 288 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
906 : planSlot);
907 :
908 : /*
909 : * If these are the first tuples stored in the buffers, add the
910 : * target rel and the mtstate to the
911 : * es_insert_pending_result_relations and
912 : * es_insert_pending_modifytables lists respectively, except in
913 : * the case where flushing was done above, in which case they
914 : * would already have been added to the lists, so no need to do
915 : * this.
916 : */
917 288 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
918 : {
919 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
920 : resultRelInfo));
921 36 : estate->es_insert_pending_result_relations =
922 36 : lappend(estate->es_insert_pending_result_relations,
923 : resultRelInfo);
924 36 : estate->es_insert_pending_modifytables =
925 36 : lappend(estate->es_insert_pending_modifytables, mtstate);
926 : }
927 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
928 : resultRelInfo));
929 :
930 288 : resultRelInfo->ri_NumSlots++;
931 :
932 288 : MemoryContextSwitchTo(oldContext);
933 :
934 288 : return NULL;
935 : }
936 :
937 : /*
938 : * insert into foreign table: let the FDW do it
939 : */
940 1726 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
941 : resultRelInfo,
942 : slot,
943 : planSlot);
944 :
945 1720 : if (slot == NULL) /* "do nothing" */
946 4 : return NULL;
947 :
948 : /*
949 : * AFTER ROW Triggers or RETURNING expressions might reference the
950 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
951 : * them. (This covers the case where the FDW replaced the slot.)
952 : */
953 1716 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
954 : }
955 : else
956 : {
957 : WCOKind wco_kind;
958 :
959 : /*
960 : * Constraints and GENERATED expressions might reference the tableoid
961 : * column, so (re-)initialize tts_tableOid before evaluating them.
962 : */
963 11396014 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
964 :
965 : /*
966 : * Compute stored generated columns
967 : */
968 11396014 : if (resultRelationDesc->rd_att->constr &&
969 3227028 : resultRelationDesc->rd_att->constr->has_generated_stored)
970 948 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
971 : CMD_INSERT);
972 :
973 : /*
974 : * Check any RLS WITH CHECK policies.
975 : *
976 : * Normally we should check INSERT policies. But if the insert is the
977 : * result of a partition key update that moved the tuple to a new
978 : * partition, we should instead check UPDATE policies, because we are
979 : * executing policies defined on the target table, and not those
980 : * defined on the child partitions.
981 : *
982 : * If we're running MERGE, we refer to the action that we're executing
983 : * to know if we're doing an INSERT or UPDATE to a partition table.
984 : */
985 11396002 : if (mtstate->operation == CMD_UPDATE)
986 734 : wco_kind = WCO_RLS_UPDATE_CHECK;
987 11395268 : else if (mtstate->operation == CMD_MERGE)
988 1712 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
989 1712 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
990 : else
991 11393556 : wco_kind = WCO_RLS_INSERT_CHECK;
992 :
993 : /*
994 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
995 : * we are looking for at this point.
996 : */
997 11396002 : if (resultRelInfo->ri_WithCheckOptions != NIL)
998 552 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
999 :
1000 : /*
1001 : * Check the constraints of the tuple.
1002 : */
1003 11395828 : if (resultRelationDesc->rd_att->constr)
1004 3226932 : ExecConstraints(resultRelInfo, slot, estate);
1005 :
1006 : /*
1007 : * Also check the tuple against the partition constraint, if there is
1008 : * one; except that if we got here via tuple-routing, we don't need to
1009 : * if there's no BR trigger defined on the partition.
1010 : */
1011 11395194 : if (resultRelationDesc->rd_rel->relispartition &&
1012 726192 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1013 721374 : (resultRelInfo->ri_TrigDesc &&
1014 1598 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1015 5014 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1016 :
1017 11395026 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1018 4106 : {
1019 : /* Perform a speculative insertion. */
1020 : uint32 specToken;
1021 : ItemPointerData conflictTid;
1022 : ItemPointerData invalidItemPtr;
1023 : bool specConflict;
1024 : List *arbiterIndexes;
1025 :
1026 9530 : ItemPointerSetInvalid(&invalidItemPtr);
1027 9530 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1028 :
1029 : /*
1030 : * Do a non-conclusive check for conflicts first.
1031 : *
1032 : * We're not holding any locks yet, so this doesn't guarantee that
1033 : * the later insert won't conflict. But it avoids leaving behind
1034 : * a lot of canceled speculative insertions, if you run a lot of
1035 : * INSERT ON CONFLICT statements that do conflict.
1036 : *
1037 : * We loop back here if we find a conflict below, either during
1038 : * the pre-check, or when we re-check after inserting the tuple
1039 : * speculatively. Better allow interrupts in case some bug makes
1040 : * this an infinite loop.
1041 : */
1042 9540 : vlock:
1043 9540 : CHECK_FOR_INTERRUPTS();
1044 9540 : specConflict = false;
1045 9540 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1046 : &conflictTid, &invalidItemPtr,
1047 : arbiterIndexes))
1048 : {
1049 : /* committed conflict tuple found */
1050 5412 : if (onconflict == ONCONFLICT_UPDATE)
1051 : {
1052 : /*
1053 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1054 : * part. Be prepared to retry if the UPDATE fails because
1055 : * of another concurrent UPDATE/DELETE to the conflict
1056 : * tuple.
1057 : */
1058 5200 : TupleTableSlot *returning = NULL;
1059 :
1060 5200 : if (ExecOnConflictUpdate(context, resultRelInfo,
1061 : &conflictTid, slot, canSetTag,
1062 : &returning))
1063 : {
1064 5122 : InstrCountTuples2(&mtstate->ps, 1);
1065 5122 : return returning;
1066 : }
1067 : else
1068 0 : goto vlock;
1069 : }
1070 : else
1071 : {
1072 : /*
1073 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1074 : * verify that the tuple is visible to the executor's MVCC
1075 : * snapshot at higher isolation levels.
1076 : *
1077 : * Using ExecGetReturningSlot() to store the tuple for the
1078 : * recheck isn't that pretty, but we can't trivially use
1079 : * the input slot, because it might not be of a compatible
1080 : * type. As there's no conflicting usage of
1081 : * ExecGetReturningSlot() in the DO NOTHING case...
1082 : */
1083 : Assert(onconflict == ONCONFLICT_NOTHING);
1084 212 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1085 : ExecGetReturningSlot(estate, resultRelInfo));
1086 192 : InstrCountTuples2(&mtstate->ps, 1);
1087 192 : return NULL;
1088 : }
1089 : }
1090 :
1091 : /*
1092 : * Before we start insertion proper, acquire our "speculative
1093 : * insertion lock". Others can use that to wait for us to decide
1094 : * if we're going to go ahead with the insertion, instead of
1095 : * waiting for the whole transaction to complete.
1096 : */
1097 4122 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1098 :
1099 : /* insert the tuple, with the speculative token */
1100 4122 : table_tuple_insert_speculative(resultRelationDesc, slot,
1101 : estate->es_output_cid,
1102 : 0,
1103 : NULL,
1104 : specToken);
1105 :
1106 : /* insert index entries for tuple */
1107 4122 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1108 : slot, estate, false, true,
1109 : &specConflict,
1110 : arbiterIndexes,
1111 : false);
1112 :
1113 : /* adjust the tuple's state accordingly */
1114 4116 : table_tuple_complete_speculative(resultRelationDesc, slot,
1115 4116 : specToken, !specConflict);
1116 :
1117 : /*
1118 : * Wake up anyone waiting for our decision. They will re-check
1119 : * the tuple, see that it's no longer speculative, and wait on our
1120 : * XID as if this was a regularly inserted tuple all along. Or if
1121 : * we killed the tuple, they will see it's dead, and proceed as if
1122 : * the tuple never existed.
1123 : */
1124 4116 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1125 :
1126 : /*
1127 : * If there was a conflict, start from the beginning. We'll do
1128 : * the pre-check again, which will now find the conflicting tuple
1129 : * (unless it aborts before we get there).
1130 : */
1131 4116 : if (specConflict)
1132 : {
1133 10 : list_free(recheckIndexes);
1134 10 : goto vlock;
1135 : }
1136 :
1137 : /* Since there was no insertion conflict, we're done */
1138 : }
1139 : else
1140 : {
1141 : /* insert the tuple normally */
1142 11385496 : table_tuple_insert(resultRelationDesc, slot,
1143 : estate->es_output_cid,
1144 : 0, NULL);
1145 :
1146 : /* insert index entries for tuple */
1147 11385456 : if (resultRelInfo->ri_NumIndices > 0)
1148 2999268 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1149 : slot, estate, false,
1150 : false, NULL, NIL,
1151 : false);
1152 : }
1153 : }
1154 :
1155 11390862 : if (canSetTag)
1156 11389690 : (estate->es_processed)++;
1157 :
1158 : /*
1159 : * If this insert is the result of a partition key update that moved the
1160 : * tuple to a new partition, put this row into the transition NEW TABLE,
1161 : * if there is one. We need to do this separately for DELETE and INSERT
1162 : * because they happen on different tables.
1163 : */
1164 11390862 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1165 11390862 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1166 42 : && mtstate->mt_transition_capture->tcs_update_new_table)
1167 : {
1168 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1169 : NULL, NULL,
1170 : NULL,
1171 : NULL,
1172 : slot,
1173 : NULL,
1174 42 : mtstate->mt_transition_capture,
1175 : false);
1176 :
1177 : /*
1178 : * We've already captured the NEW TABLE row, so make sure any AR
1179 : * INSERT trigger fired below doesn't capture it again.
1180 : */
1181 42 : ar_insert_trig_tcs = NULL;
1182 : }
1183 :
1184 : /* AFTER ROW INSERT Triggers */
1185 11390862 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1186 : ar_insert_trig_tcs);
1187 :
1188 11390862 : list_free(recheckIndexes);
1189 :
1190 : /*
1191 : * Check any WITH CHECK OPTION constraints from parent views. We are
1192 : * required to do this after testing all constraints and uniqueness
1193 : * violations per the SQL spec, so we do it after actually inserting the
1194 : * record into the heap and all indexes.
1195 : *
1196 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1197 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1198 : *
1199 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1200 : * are looking for at this point.
1201 : */
1202 11390862 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1203 364 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1204 :
1205 : /* Process RETURNING if present */
1206 11390716 : if (resultRelInfo->ri_projectReturning)
1207 3472 : result = ExecProcessReturning(resultRelInfo, slot, planSlot);
1208 :
1209 11390704 : if (inserted_tuple)
1210 760 : *inserted_tuple = slot;
1211 11390704 : if (insert_destrel)
1212 760 : *insert_destrel = resultRelInfo;
1213 :
1214 11390704 : return result;
1215 : }
1216 :
1217 : /* ----------------------------------------------------------------
1218 : * ExecBatchInsert
1219 : *
1220 : * Insert multiple tuples in an efficient way.
1221 : * Currently, this handles inserting into a foreign table without
1222 : * RETURNING clause.
1223 : * ----------------------------------------------------------------
1224 : */
1225 : static void
1226 56 : ExecBatchInsert(ModifyTableState *mtstate,
1227 : ResultRelInfo *resultRelInfo,
1228 : TupleTableSlot **slots,
1229 : TupleTableSlot **planSlots,
1230 : int numSlots,
1231 : EState *estate,
1232 : bool canSetTag)
1233 : {
1234 : int i;
1235 56 : int numInserted = numSlots;
1236 56 : TupleTableSlot *slot = NULL;
1237 : TupleTableSlot **rslots;
1238 :
1239 : /*
1240 : * insert into foreign table: let the FDW do it
1241 : */
1242 56 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1243 : resultRelInfo,
1244 : slots,
1245 : planSlots,
1246 : &numInserted);
1247 :
1248 344 : for (i = 0; i < numInserted; i++)
1249 : {
1250 288 : slot = rslots[i];
1251 :
1252 : /*
1253 : * AFTER ROW Triggers might reference the tableoid column, so
1254 : * (re-)initialize tts_tableOid before evaluating them.
1255 : */
1256 288 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1257 :
1258 : /* AFTER ROW INSERT Triggers */
1259 288 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1260 288 : mtstate->mt_transition_capture);
1261 :
1262 : /*
1263 : * Check any WITH CHECK OPTION constraints from parent views. See the
1264 : * comment in ExecInsert.
1265 : */
1266 288 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1267 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1268 : }
1269 :
1270 56 : if (canSetTag && numInserted > 0)
1271 56 : estate->es_processed += numInserted;
1272 :
1273 : /* Clean up all the slots, ready for the next batch */
1274 344 : for (i = 0; i < numSlots; i++)
1275 : {
1276 288 : ExecClearTuple(slots[i]);
1277 288 : ExecClearTuple(planSlots[i]);
1278 : }
1279 56 : resultRelInfo->ri_NumSlots = 0;
1280 56 : }
1281 :
1282 : /*
1283 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1284 : */
1285 : static void
1286 34 : ExecPendingInserts(EState *estate)
1287 : {
1288 : ListCell *l1,
1289 : *l2;
1290 :
1291 70 : forboth(l1, estate->es_insert_pending_result_relations,
1292 : l2, estate->es_insert_pending_modifytables)
1293 : {
1294 36 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1295 36 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1296 :
1297 : Assert(mtstate);
1298 36 : ExecBatchInsert(mtstate, resultRelInfo,
1299 : resultRelInfo->ri_Slots,
1300 : resultRelInfo->ri_PlanSlots,
1301 : resultRelInfo->ri_NumSlots,
1302 36 : estate, mtstate->canSetTag);
1303 : }
1304 :
1305 34 : list_free(estate->es_insert_pending_result_relations);
1306 34 : list_free(estate->es_insert_pending_modifytables);
1307 34 : estate->es_insert_pending_result_relations = NIL;
1308 34 : estate->es_insert_pending_modifytables = NIL;
1309 34 : }
1310 :
1311 : /*
1312 : * ExecDeletePrologue -- subroutine for ExecDelete
1313 : *
1314 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1315 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1316 : * the delete a no-op; otherwise, return true.
1317 : */
1318 : static bool
1319 1639850 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1320 : ItemPointer tupleid, HeapTuple oldtuple,
1321 : TupleTableSlot **epqreturnslot, TM_Result *result)
1322 : {
1323 1639850 : if (result)
1324 1382 : *result = TM_Ok;
1325 :
1326 : /* BEFORE ROW DELETE triggers */
1327 1639850 : if (resultRelInfo->ri_TrigDesc &&
1328 7126 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1329 : {
1330 : /* Flush any pending inserts, so rows are visible to the triggers */
1331 388 : if (context->estate->es_insert_pending_result_relations != NIL)
1332 2 : ExecPendingInserts(context->estate);
1333 :
1334 388 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1335 : resultRelInfo, tupleid, oldtuple,
1336 : epqreturnslot, result, &context->tmfd);
1337 : }
1338 :
1339 1639462 : return true;
1340 : }
1341 :
1342 : /*
1343 : * ExecDeleteAct -- subroutine for ExecDelete
1344 : *
1345 : * Actually delete the tuple from a plain table.
1346 : *
1347 : * Caller is in charge of doing EvalPlanQual as necessary
1348 : */
1349 : static TM_Result
1350 1639662 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1351 : ItemPointer tupleid, bool changingPart)
1352 : {
1353 1639662 : EState *estate = context->estate;
1354 :
1355 1639662 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1356 : estate->es_output_cid,
1357 : estate->es_snapshot,
1358 : estate->es_crosscheck_snapshot,
1359 : true /* wait for commit */ ,
1360 : &context->tmfd,
1361 : changingPart);
1362 : }
1363 :
1364 : /*
1365 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1366 : *
1367 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1368 : * including the UPDATE triggers if the deletion is being done as part of a
1369 : * cross-partition tuple move.
1370 : */
1371 : static void
1372 1639602 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1373 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1374 : {
1375 1639602 : ModifyTableState *mtstate = context->mtstate;
1376 1639602 : EState *estate = context->estate;
1377 : TransitionCaptureState *ar_delete_trig_tcs;
1378 :
1379 : /*
1380 : * If this delete is the result of a partition key update that moved the
1381 : * tuple to a new partition, put this row into the transition OLD TABLE,
1382 : * if there is one. We need to do this separately for DELETE and INSERT
1383 : * because they happen on different tables.
1384 : */
1385 1639602 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1386 1639602 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1387 42 : mtstate->mt_transition_capture->tcs_update_old_table)
1388 : {
1389 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1390 : NULL, NULL,
1391 : tupleid, oldtuple,
1392 42 : NULL, NULL, mtstate->mt_transition_capture,
1393 : false);
1394 :
1395 : /*
1396 : * We've already captured the OLD TABLE row, so make sure any AR
1397 : * DELETE trigger fired below doesn't capture it again.
1398 : */
1399 42 : ar_delete_trig_tcs = NULL;
1400 : }
1401 :
1402 : /* AFTER ROW DELETE Triggers */
1403 1639602 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1404 : ar_delete_trig_tcs, changingPart);
1405 1639602 : }
1406 :
1407 : /* ----------------------------------------------------------------
1408 : * ExecDelete
1409 : *
1410 : * DELETE is like UPDATE, except that we delete the tuple and no
1411 : * index modifications are needed.
1412 : *
1413 : * When deleting from a table, tupleid identifies the tuple to delete and
1414 : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1415 : * oldtuple is passed to the triggers and identifies what to delete, and
1416 : * tupleid is invalid. When deleting from a foreign table, tupleid is
1417 : * invalid; the FDW has to figure out which row to delete using data from
1418 : * the planSlot. oldtuple is passed to foreign table triggers; it is
1419 : * NULL when the foreign table has no relevant triggers. We use
1420 : * tupleDeleted to indicate whether the tuple is actually deleted,
1421 : * callers can use it to decide whether to continue the operation. When
1422 : * this DELETE is a part of an UPDATE of partition-key, then the slot
1423 : * returned by EvalPlanQual() is passed back using output parameter
1424 : * epqreturnslot.
1425 : *
1426 : * Returns RETURNING result if any, otherwise NULL.
1427 : * ----------------------------------------------------------------
1428 : */
1429 : static TupleTableSlot *
1430 1639462 : ExecDelete(ModifyTableContext *context,
1431 : ResultRelInfo *resultRelInfo,
1432 : ItemPointer tupleid,
1433 : HeapTuple oldtuple,
1434 : bool processReturning,
1435 : bool changingPart,
1436 : bool canSetTag,
1437 : TM_Result *tmresult,
1438 : bool *tupleDeleted,
1439 : TupleTableSlot **epqreturnslot)
1440 : {
1441 1639462 : EState *estate = context->estate;
1442 1639462 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1443 1639462 : TupleTableSlot *slot = NULL;
1444 : TM_Result result;
1445 :
1446 1639462 : if (tupleDeleted)
1447 994 : *tupleDeleted = false;
1448 :
1449 : /*
1450 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1451 : * done if it says we are.
1452 : */
1453 1639462 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1454 : epqreturnslot, tmresult))
1455 52 : return NULL;
1456 :
1457 : /* INSTEAD OF ROW DELETE Triggers */
1458 1639376 : if (resultRelInfo->ri_TrigDesc &&
1459 6984 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1460 48 : {
1461 : bool dodelete;
1462 :
1463 : Assert(oldtuple != NULL);
1464 54 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1465 :
1466 54 : if (!dodelete) /* "do nothing" */
1467 6 : return NULL;
1468 : }
1469 1639322 : else if (resultRelInfo->ri_FdwRoutine)
1470 : {
1471 : /*
1472 : * delete from foreign table: let the FDW do it
1473 : *
1474 : * We offer the returning slot as a place to store RETURNING data,
1475 : * although the FDW can return some other slot if it wants.
1476 : */
1477 34 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1478 34 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1479 : resultRelInfo,
1480 : slot,
1481 : context->planSlot);
1482 :
1483 34 : if (slot == NULL) /* "do nothing" */
1484 0 : return NULL;
1485 :
1486 : /*
1487 : * RETURNING expressions might reference the tableoid column, so
1488 : * (re)initialize tts_tableOid before evaluating them.
1489 : */
1490 34 : if (TTS_EMPTY(slot))
1491 6 : ExecStoreAllNullTuple(slot);
1492 :
1493 34 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1494 : }
1495 : else
1496 : {
1497 : /*
1498 : * delete the tuple
1499 : *
1500 : * Note: if context->estate->es_crosscheck_snapshot isn't
1501 : * InvalidSnapshot, we check that the row to be deleted is visible to
1502 : * that snapshot, and throw a can't-serialize error if not. This is a
1503 : * special-case behavior needed for referential integrity updates in
1504 : * transaction-snapshot mode transactions.
1505 : */
1506 1639288 : ldelete:
1507 1639292 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1508 :
1509 1639256 : if (tmresult)
1510 960 : *tmresult = result;
1511 :
1512 1639256 : switch (result)
1513 : {
1514 30 : case TM_SelfModified:
1515 :
1516 : /*
1517 : * The target tuple was already updated or deleted by the
1518 : * current command, or by a later command in the current
1519 : * transaction. The former case is possible in a join DELETE
1520 : * where multiple tuples join to the same target tuple. This
1521 : * is somewhat questionable, but Postgres has always allowed
1522 : * it: we just ignore additional deletion attempts.
1523 : *
1524 : * The latter case arises if the tuple is modified by a
1525 : * command in a BEFORE trigger, or perhaps by a command in a
1526 : * volatile function used in the query. In such situations we
1527 : * should not ignore the deletion, but it is equally unsafe to
1528 : * proceed. We don't want to discard the original DELETE
1529 : * while keeping the triggered actions based on its deletion;
1530 : * and it would be no better to allow the original DELETE
1531 : * while discarding updates that it triggered. The row update
1532 : * carries some information that might be important according
1533 : * to business rules; so throwing an error is the only safe
1534 : * course.
1535 : *
1536 : * If a trigger actually intends this type of interaction, it
1537 : * can re-execute the DELETE and then return NULL to cancel
1538 : * the outer delete.
1539 : */
1540 30 : if (context->tmfd.cmax != estate->es_output_cid)
1541 6 : ereport(ERROR,
1542 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1543 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1544 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1545 :
1546 : /* Else, already deleted by self; nothing to do */
1547 24 : return NULL;
1548 :
1549 1639162 : case TM_Ok:
1550 1639162 : break;
1551 :
1552 58 : case TM_Updated:
1553 : {
1554 : TupleTableSlot *inputslot;
1555 : TupleTableSlot *epqslot;
1556 :
1557 58 : if (IsolationUsesXactSnapshot())
1558 2 : ereport(ERROR,
1559 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1560 : errmsg("could not serialize access due to concurrent update")));
1561 :
1562 : /*
1563 : * Already know that we're going to need to do EPQ, so
1564 : * fetch tuple directly into the right slot.
1565 : */
1566 56 : EvalPlanQualBegin(context->epqstate);
1567 56 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1568 : resultRelInfo->ri_RangeTableIndex);
1569 :
1570 56 : result = table_tuple_lock(resultRelationDesc, tupleid,
1571 : estate->es_snapshot,
1572 : inputslot, estate->es_output_cid,
1573 : LockTupleExclusive, LockWaitBlock,
1574 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1575 : &context->tmfd);
1576 :
1577 52 : switch (result)
1578 : {
1579 46 : case TM_Ok:
1580 : Assert(context->tmfd.traversed);
1581 46 : epqslot = EvalPlanQual(context->epqstate,
1582 : resultRelationDesc,
1583 : resultRelInfo->ri_RangeTableIndex,
1584 : inputslot);
1585 46 : if (TupIsNull(epqslot))
1586 : /* Tuple not passing quals anymore, exiting... */
1587 30 : return NULL;
1588 :
1589 : /*
1590 : * If requested, skip delete and pass back the
1591 : * updated row.
1592 : */
1593 16 : if (epqreturnslot)
1594 : {
1595 12 : *epqreturnslot = epqslot;
1596 12 : return NULL;
1597 : }
1598 : else
1599 4 : goto ldelete;
1600 :
1601 4 : case TM_SelfModified:
1602 :
1603 : /*
1604 : * This can be reached when following an update
1605 : * chain from a tuple updated by another session,
1606 : * reaching a tuple that was already updated in
1607 : * this transaction. If previously updated by this
1608 : * command, ignore the delete, otherwise error
1609 : * out.
1610 : *
1611 : * See also TM_SelfModified response to
1612 : * table_tuple_delete() above.
1613 : */
1614 4 : if (context->tmfd.cmax != estate->es_output_cid)
1615 2 : ereport(ERROR,
1616 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1617 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1618 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1619 2 : return NULL;
1620 :
1621 2 : case TM_Deleted:
1622 : /* tuple already deleted; nothing to do */
1623 2 : return NULL;
1624 :
1625 0 : default:
1626 :
1627 : /*
1628 : * TM_Invisible should be impossible because we're
1629 : * waiting for updated row versions, and would
1630 : * already have errored out if the first version
1631 : * is invisible.
1632 : *
1633 : * TM_Updated should be impossible, because we're
1634 : * locking the latest version via
1635 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1636 : */
1637 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1638 : result);
1639 : return NULL;
1640 : }
1641 :
1642 : Assert(false);
1643 : break;
1644 : }
1645 :
1646 6 : case TM_Deleted:
1647 6 : if (IsolationUsesXactSnapshot())
1648 0 : ereport(ERROR,
1649 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1650 : errmsg("could not serialize access due to concurrent delete")));
1651 : /* tuple already deleted; nothing to do */
1652 6 : return NULL;
1653 :
1654 0 : default:
1655 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1656 : result);
1657 : return NULL;
1658 : }
1659 :
1660 : /*
1661 : * Note: Normally one would think that we have to delete index tuples
1662 : * associated with the heap tuple now...
1663 : *
1664 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1665 : * take care of it later. We can't delete index tuples immediately
1666 : * anyway, since the tuple is still visible to other transactions.
1667 : */
1668 : }
1669 :
1670 1639244 : if (canSetTag)
1671 1638100 : (estate->es_processed)++;
1672 :
1673 : /* Tell caller that the delete actually happened. */
1674 1639244 : if (tupleDeleted)
1675 916 : *tupleDeleted = true;
1676 :
1677 1639244 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1678 :
1679 : /* Process RETURNING if present and if requested */
1680 1639244 : if (processReturning && resultRelInfo->ri_projectReturning)
1681 : {
1682 : /*
1683 : * We have to put the target tuple into a slot, which means first we
1684 : * gotta fetch it. We can use the trigger tuple slot.
1685 : */
1686 : TupleTableSlot *rslot;
1687 :
1688 882 : if (resultRelInfo->ri_FdwRoutine)
1689 : {
1690 : /* FDW must have provided a slot containing the deleted row */
1691 : Assert(!TupIsNull(slot));
1692 : }
1693 : else
1694 : {
1695 876 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1696 876 : if (oldtuple != NULL)
1697 : {
1698 24 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1699 : }
1700 : else
1701 : {
1702 852 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1703 : SnapshotAny, slot))
1704 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1705 : }
1706 : }
1707 :
1708 882 : rslot = ExecProcessReturning(resultRelInfo, slot, context->planSlot);
1709 :
1710 : /*
1711 : * Before releasing the target tuple again, make sure rslot has a
1712 : * local copy of any pass-by-reference values.
1713 : */
1714 882 : ExecMaterializeSlot(rslot);
1715 :
1716 882 : ExecClearTuple(slot);
1717 :
1718 882 : return rslot;
1719 : }
1720 :
1721 1638362 : return NULL;
1722 : }
1723 :
1724 : /*
1725 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1726 : *
1727 : * This works by first deleting the old tuple from the current partition,
1728 : * followed by inserting the new tuple into the root parent table, that is,
1729 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1730 : * correct partition.
1731 : *
1732 : * Returns true if the tuple has been successfully moved, or if it's found
1733 : * that the tuple was concurrently deleted so there's nothing more to do
1734 : * for the caller.
1735 : *
1736 : * False is returned if the tuple we're trying to move is found to have been
1737 : * concurrently updated. In that case, the caller must check if the updated
1738 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1739 : * this function again or perform a regular update accordingly. For MERGE,
1740 : * the updated tuple is not returned in *retry_slot; it has its own retry
1741 : * logic.
1742 : */
1743 : static bool
1744 1042 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1745 : ResultRelInfo *resultRelInfo,
1746 : ItemPointer tupleid, HeapTuple oldtuple,
1747 : TupleTableSlot *slot,
1748 : bool canSetTag,
1749 : UpdateContext *updateCxt,
1750 : TM_Result *tmresult,
1751 : TupleTableSlot **retry_slot,
1752 : TupleTableSlot **inserted_tuple,
1753 : ResultRelInfo **insert_destrel)
1754 : {
1755 1042 : ModifyTableState *mtstate = context->mtstate;
1756 1042 : EState *estate = mtstate->ps.state;
1757 : TupleConversionMap *tupconv_map;
1758 : bool tuple_deleted;
1759 1042 : TupleTableSlot *epqslot = NULL;
1760 :
1761 1042 : context->cpUpdateReturningSlot = NULL;
1762 1042 : *retry_slot = NULL;
1763 :
1764 : /*
1765 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1766 : * to migrate to a different partition. Maybe this can be implemented
1767 : * some day, but it seems a fringe feature with little redeeming value.
1768 : */
1769 1042 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1770 0 : ereport(ERROR,
1771 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1772 : errmsg("invalid ON UPDATE specification"),
1773 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1774 :
1775 : /*
1776 : * When an UPDATE is run directly on a leaf partition, simply fail with a
1777 : * partition constraint violation error.
1778 : */
1779 1042 : if (resultRelInfo == mtstate->rootResultRelInfo)
1780 48 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1781 :
1782 : /* Initialize tuple routing info if not already done. */
1783 994 : if (mtstate->mt_partition_tuple_routing == NULL)
1784 : {
1785 614 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1786 : MemoryContext oldcxt;
1787 :
1788 : /* Things built here have to last for the query duration. */
1789 614 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1790 :
1791 614 : mtstate->mt_partition_tuple_routing =
1792 614 : ExecSetupPartitionTupleRouting(estate, rootRel);
1793 :
1794 : /*
1795 : * Before a partition's tuple can be re-routed, it must first be
1796 : * converted to the root's format, so we'll need a slot for storing
1797 : * such tuples.
1798 : */
1799 : Assert(mtstate->mt_root_tuple_slot == NULL);
1800 614 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1801 :
1802 614 : MemoryContextSwitchTo(oldcxt);
1803 : }
1804 :
1805 : /*
1806 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1807 : * We want to return rows from INSERT.
1808 : */
1809 994 : ExecDelete(context, resultRelInfo,
1810 : tupleid, oldtuple,
1811 : false, /* processReturning */
1812 : true, /* changingPart */
1813 : false, /* canSetTag */
1814 : tmresult, &tuple_deleted, &epqslot);
1815 :
1816 : /*
1817 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
1818 : * it was already deleted by self, or it was concurrently deleted by
1819 : * another transaction), then we should skip the insert as well;
1820 : * otherwise, an UPDATE could cause an increase in the total number of
1821 : * rows across all partitions, which is clearly wrong.
1822 : *
1823 : * For a normal UPDATE, the case where the tuple has been the subject of a
1824 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
1825 : * machinery, but for an UPDATE that we've translated into a DELETE from
1826 : * this partition and an INSERT into some other partition, that's not
1827 : * available, because CTID chains can't span relation boundaries. We
1828 : * mimic the semantics to a limited extent by skipping the INSERT if the
1829 : * DELETE fails to find a tuple. This ensures that two concurrent
1830 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
1831 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
1832 : * it.
1833 : */
1834 992 : if (!tuple_deleted)
1835 : {
1836 : /*
1837 : * epqslot will be typically NULL. But when ExecDelete() finds that
1838 : * another transaction has concurrently updated the same row, it
1839 : * re-fetches the row, skips the delete, and epqslot is set to the
1840 : * re-fetched tuple slot. In that case, we need to do all the checks
1841 : * again. For MERGE, we leave everything to the caller (it must do
1842 : * additional rechecking, and might end up executing a different
1843 : * action entirely).
1844 : */
1845 76 : if (mtstate->operation == CMD_MERGE)
1846 34 : return *tmresult == TM_Ok;
1847 42 : else if (TupIsNull(epqslot))
1848 36 : return true;
1849 : else
1850 : {
1851 : /* Fetch the most recent version of old tuple. */
1852 : TupleTableSlot *oldSlot;
1853 :
1854 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
1855 6 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
1856 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
1857 6 : oldSlot = resultRelInfo->ri_oldTupleSlot;
1858 6 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
1859 : tupleid,
1860 : SnapshotAny,
1861 : oldSlot))
1862 0 : elog(ERROR, "failed to fetch tuple being updated");
1863 : /* and project the new tuple to retry the UPDATE with */
1864 6 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
1865 : oldSlot);
1866 6 : return false;
1867 : }
1868 : }
1869 :
1870 : /*
1871 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
1872 : * convert the tuple into root's tuple descriptor if needed, since
1873 : * ExecInsert() starts the search from root.
1874 : */
1875 916 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1876 916 : if (tupconv_map != NULL)
1877 302 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1878 : slot,
1879 : mtstate->mt_root_tuple_slot);
1880 :
1881 : /* Tuple routing starts from the root table. */
1882 788 : context->cpUpdateReturningSlot =
1883 916 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
1884 : inserted_tuple, insert_destrel);
1885 :
1886 : /*
1887 : * Reset the transition state that may possibly have been written by
1888 : * INSERT.
1889 : */
1890 788 : if (mtstate->mt_transition_capture)
1891 42 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
1892 :
1893 : /* We're done moving. */
1894 788 : return true;
1895 : }
1896 :
1897 : /*
1898 : * ExecUpdatePrologue -- subroutine for ExecUpdate
1899 : *
1900 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
1901 : * triggers. We return false if one of them makes the update a no-op;
1902 : * otherwise, return true.
1903 : */
1904 : static bool
1905 316110 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1906 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1907 : TM_Result *result)
1908 : {
1909 316110 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1910 :
1911 316110 : if (result)
1912 2128 : *result = TM_Ok;
1913 :
1914 316110 : ExecMaterializeSlot(slot);
1915 :
1916 : /*
1917 : * Open the table's indexes, if we have not done so already, so that we
1918 : * can add new index entries for the updated tuple.
1919 : */
1920 316110 : if (resultRelationDesc->rd_rel->relhasindex &&
1921 225500 : resultRelInfo->ri_IndexRelationDescs == NULL)
1922 8194 : ExecOpenIndices(resultRelInfo, false);
1923 :
1924 : /* BEFORE ROW UPDATE triggers */
1925 316110 : if (resultRelInfo->ri_TrigDesc &&
1926 6262 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
1927 : {
1928 : /* Flush any pending inserts, so rows are visible to the triggers */
1929 2572 : if (context->estate->es_insert_pending_result_relations != NIL)
1930 2 : ExecPendingInserts(context->estate);
1931 :
1932 2572 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
1933 : resultRelInfo, tupleid, oldtuple, slot,
1934 : result, &context->tmfd);
1935 : }
1936 :
1937 313538 : return true;
1938 : }
1939 :
1940 : /*
1941 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
1942 : *
1943 : * Apply the final modifications to the tuple slot before the update.
1944 : * (This is split out because we also need it in the foreign-table code path.)
1945 : */
1946 : static void
1947 315832 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
1948 : TupleTableSlot *slot,
1949 : EState *estate)
1950 : {
1951 315832 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1952 :
1953 : /*
1954 : * Constraints and GENERATED expressions might reference the tableoid
1955 : * column, so (re-)initialize tts_tableOid before evaluating them.
1956 : */
1957 315832 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1958 :
1959 : /*
1960 : * Compute stored generated columns
1961 : */
1962 315832 : if (resultRelationDesc->rd_att->constr &&
1963 188554 : resultRelationDesc->rd_att->constr->has_generated_stored)
1964 260 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1965 : CMD_UPDATE);
1966 315832 : }
1967 :
1968 : /*
1969 : * ExecUpdateAct -- subroutine for ExecUpdate
1970 : *
1971 : * Actually update the tuple, when operating on a plain table. If the
1972 : * table is a partition, and the command was called referencing an ancestor
1973 : * partitioned table, this routine migrates the resulting tuple to another
1974 : * partition.
1975 : *
1976 : * The caller is in charge of keeping indexes current as necessary. The
1977 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
1978 : * be concurrently updated. However, in case of a cross-partition update,
1979 : * this routine does it.
1980 : */
1981 : static TM_Result
1982 315680 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1983 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1984 : bool canSetTag, UpdateContext *updateCxt)
1985 : {
1986 315680 : EState *estate = context->estate;
1987 315680 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1988 : bool partition_constraint_failed;
1989 : TM_Result result;
1990 :
1991 315680 : updateCxt->crossPartUpdate = false;
1992 :
1993 : /*
1994 : * If we move the tuple to a new partition, we loop back here to recompute
1995 : * GENERATED values (which are allowed to be different across partitions)
1996 : * and recheck any RLS policies and constraints. We do not fire any
1997 : * BEFORE triggers of the new partition, however.
1998 : */
1999 315686 : lreplace:
2000 : /* Fill in GENERATEd columns */
2001 315686 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2002 :
2003 : /* ensure slot is independent, consider e.g. EPQ */
2004 315686 : ExecMaterializeSlot(slot);
2005 :
2006 : /*
2007 : * If partition constraint fails, this row might get moved to another
2008 : * partition, in which case we should check the RLS CHECK policy just
2009 : * before inserting into the new partition, rather than doing it here.
2010 : * This is because a trigger on that partition might again change the row.
2011 : * So skip the WCO checks if the partition constraint fails.
2012 : */
2013 315686 : partition_constraint_failed =
2014 318376 : resultRelationDesc->rd_rel->relispartition &&
2015 2690 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2016 :
2017 : /* Check any RLS UPDATE WITH CHECK policies */
2018 315686 : if (!partition_constraint_failed &&
2019 314644 : resultRelInfo->ri_WithCheckOptions != NIL)
2020 : {
2021 : /*
2022 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2023 : * we are looking for at this point.
2024 : */
2025 480 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2026 : resultRelInfo, slot, estate);
2027 : }
2028 :
2029 : /*
2030 : * If a partition check failed, try to move the row into the right
2031 : * partition.
2032 : */
2033 315632 : if (partition_constraint_failed)
2034 : {
2035 : TupleTableSlot *inserted_tuple,
2036 : *retry_slot;
2037 1042 : ResultRelInfo *insert_destrel = NULL;
2038 :
2039 : /*
2040 : * ExecCrossPartitionUpdate will first DELETE the row from the
2041 : * partition it's currently in and then insert it back into the root
2042 : * table, which will re-route it to the correct partition. However,
2043 : * if the tuple has been concurrently updated, a retry is needed.
2044 : */
2045 1042 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2046 : tupleid, oldtuple, slot,
2047 : canSetTag, updateCxt,
2048 : &result,
2049 : &retry_slot,
2050 : &inserted_tuple,
2051 : &insert_destrel))
2052 : {
2053 : /* success! */
2054 848 : updateCxt->crossPartUpdate = true;
2055 :
2056 : /*
2057 : * If the partitioned table being updated is referenced in foreign
2058 : * keys, queue up trigger events to check that none of them were
2059 : * violated. No special treatment is needed in
2060 : * non-cross-partition update situations, because the leaf
2061 : * partition's AR update triggers will take care of that. During
2062 : * cross-partition updates implemented as delete on the source
2063 : * partition followed by insert on the destination partition,
2064 : * AR-UPDATE triggers of the root table (that is, the table
2065 : * mentioned in the query) must be fired.
2066 : *
2067 : * NULL insert_destrel means that the move failed to occur, that
2068 : * is, the update failed, so no need to anything in that case.
2069 : */
2070 848 : if (insert_destrel &&
2071 760 : resultRelInfo->ri_TrigDesc &&
2072 362 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2073 300 : ExecCrossPartitionUpdateForeignKey(context,
2074 : resultRelInfo,
2075 : insert_destrel,
2076 : tupleid, slot,
2077 : inserted_tuple);
2078 :
2079 852 : return TM_Ok;
2080 : }
2081 :
2082 : /*
2083 : * No luck, a retry is needed. If running MERGE, we do not do so
2084 : * here; instead let it handle that on its own rules.
2085 : */
2086 16 : if (context->mtstate->operation == CMD_MERGE)
2087 10 : return result;
2088 :
2089 : /*
2090 : * ExecCrossPartitionUpdate installed an updated version of the new
2091 : * tuple in the retry slot; start over.
2092 : */
2093 6 : slot = retry_slot;
2094 6 : goto lreplace;
2095 : }
2096 :
2097 : /*
2098 : * Check the constraints of the tuple. We've already checked the
2099 : * partition constraint above; however, we must still ensure the tuple
2100 : * passes all other constraints, so we will call ExecConstraints() and
2101 : * have it validate all remaining checks.
2102 : */
2103 314590 : if (resultRelationDesc->rd_att->constr)
2104 187976 : ExecConstraints(resultRelInfo, slot, estate);
2105 :
2106 : /*
2107 : * replace the heap tuple
2108 : *
2109 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2110 : * the row to be updated is visible to that snapshot, and throw a
2111 : * can't-serialize error if not. This is a special-case behavior needed
2112 : * for referential integrity updates in transaction-snapshot mode
2113 : * transactions.
2114 : */
2115 314516 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2116 : estate->es_output_cid,
2117 : estate->es_snapshot,
2118 : estate->es_crosscheck_snapshot,
2119 : true /* wait for commit */ ,
2120 : &context->tmfd, &updateCxt->lockmode,
2121 : &updateCxt->updateIndexes);
2122 :
2123 314492 : return result;
2124 : }
2125 :
2126 : /*
2127 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2128 : *
2129 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2130 : * returns indicating that the tuple was updated.
2131 : */
2132 : static void
2133 314498 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2134 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2135 : HeapTuple oldtuple, TupleTableSlot *slot)
2136 : {
2137 314498 : ModifyTableState *mtstate = context->mtstate;
2138 314498 : List *recheckIndexes = NIL;
2139 :
2140 : /* insert index entries for tuple if necessary */
2141 314498 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2142 170664 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2143 : slot, context->estate,
2144 : true, false,
2145 : NULL, NIL,
2146 170664 : (updateCxt->updateIndexes == TU_Summarizing));
2147 :
2148 : /* AFTER ROW UPDATE Triggers */
2149 314408 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2150 : NULL, NULL,
2151 : tupleid, oldtuple, slot,
2152 : recheckIndexes,
2153 314408 : mtstate->operation == CMD_INSERT ?
2154 : mtstate->mt_oc_transition_capture :
2155 : mtstate->mt_transition_capture,
2156 : false);
2157 :
2158 314408 : list_free(recheckIndexes);
2159 :
2160 : /*
2161 : * Check any WITH CHECK OPTION constraints from parent views. We are
2162 : * required to do this after testing all constraints and uniqueness
2163 : * violations per the SQL spec, so we do it after actually updating the
2164 : * record in the heap and all indexes.
2165 : *
2166 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2167 : * are looking for at this point.
2168 : */
2169 314408 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2170 454 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2171 : slot, context->estate);
2172 314332 : }
2173 :
2174 : /*
2175 : * Queues up an update event using the target root partitioned table's
2176 : * trigger to check that a cross-partition update hasn't broken any foreign
2177 : * keys pointing into it.
2178 : */
2179 : static void
2180 300 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2181 : ResultRelInfo *sourcePartInfo,
2182 : ResultRelInfo *destPartInfo,
2183 : ItemPointer tupleid,
2184 : TupleTableSlot *oldslot,
2185 : TupleTableSlot *newslot)
2186 : {
2187 : ListCell *lc;
2188 : ResultRelInfo *rootRelInfo;
2189 : List *ancestorRels;
2190 :
2191 300 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2192 300 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2193 :
2194 : /*
2195 : * For any foreign keys that point directly into a non-root ancestors of
2196 : * the source partition, we can in theory fire an update event to enforce
2197 : * those constraints using their triggers, if we could tell that both the
2198 : * source and the destination partitions are under the same ancestor. But
2199 : * for now, we simply report an error that those cannot be enforced.
2200 : */
2201 654 : foreach(lc, ancestorRels)
2202 : {
2203 360 : ResultRelInfo *rInfo = lfirst(lc);
2204 360 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2205 360 : bool has_noncloned_fkey = false;
2206 :
2207 : /* Root ancestor's triggers will be processed. */
2208 360 : if (rInfo == rootRelInfo)
2209 294 : continue;
2210 :
2211 66 : if (trigdesc && trigdesc->trig_update_after_row)
2212 : {
2213 228 : for (int i = 0; i < trigdesc->numtriggers; i++)
2214 : {
2215 168 : Trigger *trig = &trigdesc->triggers[i];
2216 :
2217 174 : if (!trig->tgisclone &&
2218 6 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2219 : {
2220 6 : has_noncloned_fkey = true;
2221 6 : break;
2222 : }
2223 : }
2224 : }
2225 :
2226 66 : if (has_noncloned_fkey)
2227 6 : ereport(ERROR,
2228 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2229 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2230 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2231 : RelationGetRelationName(rInfo->ri_RelationDesc),
2232 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2233 : errhint("Consider defining the foreign key on table \"%s\".",
2234 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2235 : }
2236 :
2237 : /* Perform the root table's triggers. */
2238 294 : ExecARUpdateTriggers(context->estate,
2239 : rootRelInfo, sourcePartInfo, destPartInfo,
2240 : tupleid, NULL, newslot, NIL, NULL, true);
2241 294 : }
2242 :
2243 : /* ----------------------------------------------------------------
2244 : * ExecUpdate
2245 : *
2246 : * note: we can't run UPDATE queries with transactions
2247 : * off because UPDATEs are actually INSERTs and our
2248 : * scan will mistakenly loop forever, updating the tuple
2249 : * it just inserted.. This should be fixed but until it
2250 : * is, we don't want to get stuck in an infinite loop
2251 : * which corrupts your database..
2252 : *
2253 : * When updating a table, tupleid identifies the tuple to update and
2254 : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2255 : * oldtuple is passed to the triggers and identifies what to update, and
2256 : * tupleid is invalid. When updating a foreign table, tupleid is
2257 : * invalid; the FDW has to figure out which row to update using data from
2258 : * the planSlot. oldtuple is passed to foreign table triggers; it is
2259 : * NULL when the foreign table has no relevant triggers.
2260 : *
2261 : * slot contains the new tuple value to be stored.
2262 : * planSlot is the output of the ModifyTable's subplan; we use it
2263 : * to access values from other input tables (for RETURNING),
2264 : * row-ID junk columns, etc.
2265 : *
2266 : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2267 : * had identified the tuple to update, it will identify the tuple
2268 : * actually updated after EvalPlanQual.
2269 : * ----------------------------------------------------------------
2270 : */
2271 : static TupleTableSlot *
2272 313982 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2273 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2274 : bool canSetTag)
2275 : {
2276 313982 : EState *estate = context->estate;
2277 313982 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2278 313982 : UpdateContext updateCxt = {0};
2279 : TM_Result result;
2280 :
2281 : /*
2282 : * abort the operation if not running transactions
2283 : */
2284 313982 : if (IsBootstrapProcessingMode())
2285 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2286 :
2287 : /*
2288 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2289 : * done if it says we are.
2290 : */
2291 313982 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2292 138 : return NULL;
2293 :
2294 : /* INSTEAD OF ROW UPDATE Triggers */
2295 313808 : if (resultRelInfo->ri_TrigDesc &&
2296 5736 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2297 : {
2298 114 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2299 : oldtuple, slot))
2300 18 : return NULL; /* "do nothing" */
2301 : }
2302 313694 : else if (resultRelInfo->ri_FdwRoutine)
2303 : {
2304 : /* Fill in GENERATEd columns */
2305 146 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2306 :
2307 : /*
2308 : * update in foreign table: let the FDW do it
2309 : */
2310 146 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2311 : resultRelInfo,
2312 : slot,
2313 : context->planSlot);
2314 :
2315 146 : if (slot == NULL) /* "do nothing" */
2316 2 : return NULL;
2317 :
2318 : /*
2319 : * AFTER ROW Triggers or RETURNING expressions might reference the
2320 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2321 : * them. (This covers the case where the FDW replaced the slot.)
2322 : */
2323 144 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2324 : }
2325 : else
2326 : {
2327 : ItemPointerData lockedtid;
2328 :
2329 : /*
2330 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2331 : * must loop back here to try again. (We don't need to redo triggers,
2332 : * however. If there are any BEFORE triggers then trigger.c will have
2333 : * done table_tuple_lock to lock the correct tuple, so there's no need
2334 : * to do them again.)
2335 : */
2336 313548 : redo_act:
2337 313648 : lockedtid = *tupleid;
2338 313648 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2339 : canSetTag, &updateCxt);
2340 :
2341 : /*
2342 : * If ExecUpdateAct reports that a cross-partition update was done,
2343 : * then the RETURNING tuple (if any) has been projected and there's
2344 : * nothing else for us to do.
2345 : */
2346 313332 : if (updateCxt.crossPartUpdate)
2347 836 : return context->cpUpdateReturningSlot;
2348 :
2349 312624 : switch (result)
2350 : {
2351 84 : case TM_SelfModified:
2352 :
2353 : /*
2354 : * The target tuple was already updated or deleted by the
2355 : * current command, or by a later command in the current
2356 : * transaction. The former case is possible in a join UPDATE
2357 : * where multiple tuples join to the same target tuple. This
2358 : * is pretty questionable, but Postgres has always allowed it:
2359 : * we just execute the first update action and ignore
2360 : * additional update attempts.
2361 : *
2362 : * The latter case arises if the tuple is modified by a
2363 : * command in a BEFORE trigger, or perhaps by a command in a
2364 : * volatile function used in the query. In such situations we
2365 : * should not ignore the update, but it is equally unsafe to
2366 : * proceed. We don't want to discard the original UPDATE
2367 : * while keeping the triggered actions based on it; and we
2368 : * have no principled way to merge this update with the
2369 : * previous ones. So throwing an error is the only safe
2370 : * course.
2371 : *
2372 : * If a trigger actually intends this type of interaction, it
2373 : * can re-execute the UPDATE (assuming it can figure out how)
2374 : * and then return NULL to cancel the outer update.
2375 : */
2376 84 : if (context->tmfd.cmax != estate->es_output_cid)
2377 6 : ereport(ERROR,
2378 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2379 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2380 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2381 :
2382 : /* Else, already updated by self; nothing to do */
2383 78 : return NULL;
2384 :
2385 312380 : case TM_Ok:
2386 312380 : break;
2387 :
2388 152 : case TM_Updated:
2389 : {
2390 : TupleTableSlot *inputslot;
2391 : TupleTableSlot *epqslot;
2392 : TupleTableSlot *oldSlot;
2393 :
2394 152 : if (IsolationUsesXactSnapshot())
2395 4 : ereport(ERROR,
2396 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2397 : errmsg("could not serialize access due to concurrent update")));
2398 :
2399 : /*
2400 : * Already know that we're going to need to do EPQ, so
2401 : * fetch tuple directly into the right slot.
2402 : */
2403 148 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2404 : resultRelInfo->ri_RangeTableIndex);
2405 :
2406 148 : result = table_tuple_lock(resultRelationDesc, tupleid,
2407 : estate->es_snapshot,
2408 : inputslot, estate->es_output_cid,
2409 : updateCxt.lockmode, LockWaitBlock,
2410 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2411 : &context->tmfd);
2412 :
2413 144 : switch (result)
2414 : {
2415 134 : case TM_Ok:
2416 : Assert(context->tmfd.traversed);
2417 :
2418 134 : epqslot = EvalPlanQual(context->epqstate,
2419 : resultRelationDesc,
2420 : resultRelInfo->ri_RangeTableIndex,
2421 : inputslot);
2422 134 : if (TupIsNull(epqslot))
2423 : /* Tuple not passing quals anymore, exiting... */
2424 34 : return NULL;
2425 :
2426 : /* Make sure ri_oldTupleSlot is initialized. */
2427 100 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2428 0 : ExecInitUpdateProjection(context->mtstate,
2429 : resultRelInfo);
2430 :
2431 100 : if (resultRelInfo->ri_needLockTagTuple)
2432 : {
2433 2 : UnlockTuple(resultRelationDesc,
2434 : &lockedtid, InplaceUpdateTupleLock);
2435 2 : LockTuple(resultRelationDesc,
2436 : tupleid, InplaceUpdateTupleLock);
2437 : }
2438 :
2439 : /* Fetch the most recent version of old tuple. */
2440 100 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2441 100 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2442 : tupleid,
2443 : SnapshotAny,
2444 : oldSlot))
2445 0 : elog(ERROR, "failed to fetch tuple being updated");
2446 100 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2447 : epqslot, oldSlot);
2448 100 : goto redo_act;
2449 :
2450 2 : case TM_Deleted:
2451 : /* tuple already deleted; nothing to do */
2452 2 : return NULL;
2453 :
2454 8 : case TM_SelfModified:
2455 :
2456 : /*
2457 : * This can be reached when following an update
2458 : * chain from a tuple updated by another session,
2459 : * reaching a tuple that was already updated in
2460 : * this transaction. If previously modified by
2461 : * this command, ignore the redundant update,
2462 : * otherwise error out.
2463 : *
2464 : * See also TM_SelfModified response to
2465 : * table_tuple_update() above.
2466 : */
2467 8 : if (context->tmfd.cmax != estate->es_output_cid)
2468 2 : ereport(ERROR,
2469 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2470 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2471 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2472 6 : return NULL;
2473 :
2474 0 : default:
2475 : /* see table_tuple_lock call in ExecDelete() */
2476 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2477 : result);
2478 : return NULL;
2479 : }
2480 : }
2481 :
2482 : break;
2483 :
2484 8 : case TM_Deleted:
2485 8 : if (IsolationUsesXactSnapshot())
2486 0 : ereport(ERROR,
2487 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2488 : errmsg("could not serialize access due to concurrent delete")));
2489 : /* tuple already deleted; nothing to do */
2490 8 : return NULL;
2491 :
2492 0 : default:
2493 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2494 : result);
2495 : return NULL;
2496 : }
2497 : }
2498 :
2499 312614 : if (canSetTag)
2500 312022 : (estate->es_processed)++;
2501 :
2502 312614 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2503 : slot);
2504 :
2505 : /* Process RETURNING if present */
2506 312460 : if (resultRelInfo->ri_projectReturning)
2507 2158 : return ExecProcessReturning(resultRelInfo, slot, context->planSlot);
2508 :
2509 310302 : return NULL;
2510 : }
2511 :
2512 : /*
2513 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2514 : *
2515 : * Try to lock tuple for update as part of speculative insertion. If
2516 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2517 : * (but still lock row, even though it may not satisfy estate's
2518 : * snapshot).
2519 : *
2520 : * Returns true if we're done (with or without an update), or false if
2521 : * the caller must retry the INSERT from scratch.
2522 : */
2523 : static bool
2524 5200 : ExecOnConflictUpdate(ModifyTableContext *context,
2525 : ResultRelInfo *resultRelInfo,
2526 : ItemPointer conflictTid,
2527 : TupleTableSlot *excludedSlot,
2528 : bool canSetTag,
2529 : TupleTableSlot **returning)
2530 : {
2531 5200 : ModifyTableState *mtstate = context->mtstate;
2532 5200 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2533 5200 : Relation relation = resultRelInfo->ri_RelationDesc;
2534 5200 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2535 5200 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2536 : TM_FailureData tmfd;
2537 : LockTupleMode lockmode;
2538 : TM_Result test;
2539 : Datum xminDatum;
2540 : TransactionId xmin;
2541 : bool isnull;
2542 :
2543 : /*
2544 : * Parse analysis should have blocked ON CONFLICT for all system
2545 : * relations, which includes these. There's no fundamental obstacle to
2546 : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2547 : * ExecUpdate() caller.
2548 : */
2549 : Assert(!resultRelInfo->ri_needLockTagTuple);
2550 :
2551 : /* Determine lock mode to use */
2552 5200 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2553 :
2554 : /*
2555 : * Lock tuple for update. Don't follow updates when tuple cannot be
2556 : * locked without doing so. A row locking conflict here means our
2557 : * previous conclusion that the tuple is conclusively committed is not
2558 : * true anymore.
2559 : */
2560 5200 : test = table_tuple_lock(relation, conflictTid,
2561 5200 : context->estate->es_snapshot,
2562 5200 : existing, context->estate->es_output_cid,
2563 : lockmode, LockWaitBlock, 0,
2564 : &tmfd);
2565 5200 : switch (test)
2566 : {
2567 5176 : case TM_Ok:
2568 : /* success! */
2569 5176 : break;
2570 :
2571 24 : case TM_Invisible:
2572 :
2573 : /*
2574 : * This can occur when a just inserted tuple is updated again in
2575 : * the same command. E.g. because multiple rows with the same
2576 : * conflicting key values are inserted.
2577 : *
2578 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2579 : * case. We do not want to proceed because it would lead to the
2580 : * same row being updated a second time in some unspecified order,
2581 : * and in contrast to plain UPDATEs there's no historical behavior
2582 : * to break.
2583 : *
2584 : * It is the user's responsibility to prevent this situation from
2585 : * occurring. These problems are why the SQL standard similarly
2586 : * specifies that for SQL MERGE, an exception must be raised in
2587 : * the event of an attempt to update the same row twice.
2588 : */
2589 24 : xminDatum = slot_getsysattr(existing,
2590 : MinTransactionIdAttributeNumber,
2591 : &isnull);
2592 : Assert(!isnull);
2593 24 : xmin = DatumGetTransactionId(xminDatum);
2594 :
2595 24 : if (TransactionIdIsCurrentTransactionId(xmin))
2596 24 : ereport(ERROR,
2597 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2598 : /* translator: %s is a SQL command name */
2599 : errmsg("%s command cannot affect row a second time",
2600 : "ON CONFLICT DO UPDATE"),
2601 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2602 :
2603 : /* This shouldn't happen */
2604 0 : elog(ERROR, "attempted to lock invisible tuple");
2605 : break;
2606 :
2607 0 : case TM_SelfModified:
2608 :
2609 : /*
2610 : * This state should never be reached. As a dirty snapshot is used
2611 : * to find conflicting tuples, speculative insertion wouldn't have
2612 : * seen this row to conflict with.
2613 : */
2614 0 : elog(ERROR, "unexpected self-updated tuple");
2615 : break;
2616 :
2617 0 : case TM_Updated:
2618 0 : if (IsolationUsesXactSnapshot())
2619 0 : ereport(ERROR,
2620 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2621 : errmsg("could not serialize access due to concurrent update")));
2622 :
2623 : /*
2624 : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2625 : * a partitioned table we shouldn't reach to a case where tuple to
2626 : * be lock is moved to another partition due to concurrent update
2627 : * of the partition key.
2628 : */
2629 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2630 :
2631 : /*
2632 : * Tell caller to try again from the very start.
2633 : *
2634 : * It does not make sense to use the usual EvalPlanQual() style
2635 : * loop here, as the new version of the row might not conflict
2636 : * anymore, or the conflicting tuple has actually been deleted.
2637 : */
2638 0 : ExecClearTuple(existing);
2639 0 : return false;
2640 :
2641 0 : case TM_Deleted:
2642 0 : if (IsolationUsesXactSnapshot())
2643 0 : ereport(ERROR,
2644 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2645 : errmsg("could not serialize access due to concurrent delete")));
2646 :
2647 : /* see TM_Updated case */
2648 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2649 0 : ExecClearTuple(existing);
2650 0 : return false;
2651 :
2652 0 : default:
2653 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2654 : }
2655 :
2656 : /* Success, the tuple is locked. */
2657 :
2658 : /*
2659 : * Verify that the tuple is visible to our MVCC snapshot if the current
2660 : * isolation level mandates that.
2661 : *
2662 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2663 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2664 : *
2665 : * This means we only ever continue when a new command in the current
2666 : * transaction could see the row, even though in READ COMMITTED mode the
2667 : * tuple will not be visible according to the current statement's
2668 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2669 : * versions.
2670 : */
2671 5176 : ExecCheckTupleVisible(context->estate, relation, existing);
2672 :
2673 : /*
2674 : * Make tuple and any needed join variables available to ExecQual and
2675 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2676 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2677 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2678 : * other redirection.
2679 : */
2680 5176 : econtext->ecxt_scantuple = existing;
2681 5176 : econtext->ecxt_innertuple = excludedSlot;
2682 5176 : econtext->ecxt_outertuple = NULL;
2683 :
2684 5176 : if (!ExecQual(onConflictSetWhere, econtext))
2685 : {
2686 32 : ExecClearTuple(existing); /* see return below */
2687 32 : InstrCountFiltered1(&mtstate->ps, 1);
2688 32 : return true; /* done with the tuple */
2689 : }
2690 :
2691 5144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2692 : {
2693 : /*
2694 : * Check target's existing tuple against UPDATE-applicable USING
2695 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2696 : *
2697 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2698 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2699 : * but that's almost the extent of its special handling for ON
2700 : * CONFLICT DO UPDATE.
2701 : *
2702 : * The rewriter will also have associated UPDATE applicable straight
2703 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2704 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2705 : * kinds, so there is no danger of spurious over-enforcement in the
2706 : * INSERT or UPDATE path.
2707 : */
2708 60 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2709 : existing,
2710 : mtstate->ps.state);
2711 : }
2712 :
2713 : /* Project the new tuple version */
2714 5120 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2715 :
2716 : /*
2717 : * Note that it is possible that the target tuple has been modified in
2718 : * this session, after the above table_tuple_lock. We choose to not error
2719 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2720 : * This can happen if an UPDATE is triggered from within ExecQual(),
2721 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2722 : * wCTE in the ON CONFLICT's SET.
2723 : */
2724 :
2725 : /* Execute UPDATE with projection */
2726 10210 : *returning = ExecUpdate(context, resultRelInfo,
2727 : conflictTid, NULL,
2728 5120 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2729 : canSetTag);
2730 :
2731 : /*
2732 : * Clear out existing tuple, as there might not be another conflict among
2733 : * the next input rows. Don't want to hold resources till the end of the
2734 : * query.
2735 : */
2736 5090 : ExecClearTuple(existing);
2737 5090 : return true;
2738 : }
2739 :
2740 : /*
2741 : * Perform MERGE.
2742 : */
2743 : static TupleTableSlot *
2744 13992 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2745 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2746 : {
2747 13992 : TupleTableSlot *rslot = NULL;
2748 : bool matched;
2749 :
2750 : /*-----
2751 : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2752 : * valid, depending on whether the result relation is a table or a view.
2753 : * We execute the first action for which the additional WHEN MATCHED AND
2754 : * quals pass. If an action without quals is found, that action is
2755 : * executed.
2756 : *
2757 : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2758 : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2759 : * in sequence until one passes. This is almost identical to the WHEN
2760 : * MATCHED case, and both cases are handled by ExecMergeMatched().
2761 : *
2762 : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2763 : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2764 : * TARGET] actions in sequence until one passes.
2765 : *
2766 : * Things get interesting in case of concurrent update/delete of the
2767 : * target tuple. Such concurrent update/delete is detected while we are
2768 : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2769 : *
2770 : * A concurrent update can:
2771 : *
2772 : * 1. modify the target tuple so that the results from checking any
2773 : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2774 : * SOURCE actions potentially change, but the result from the join
2775 : * quals does not change.
2776 : *
2777 : * In this case, we are still dealing with the same kind of match
2778 : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2779 : * actions from the start and choose the first one that satisfies the
2780 : * new target tuple.
2781 : *
2782 : * 2. modify the target tuple in the WHEN MATCHED case so that the join
2783 : * quals no longer pass and hence the source and target tuples no
2784 : * longer match.
2785 : *
2786 : * In this case, we are now dealing with a NOT MATCHED case, and we
2787 : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2788 : * TARGET] actions. First ExecMergeMatched() processes the list of
2789 : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2790 : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2791 : * TARGET] actions in sequence until one passes. Thus we may execute
2792 : * two actions; one of each kind.
2793 : *
2794 : * Thus we support concurrent updates that turn MATCHED candidate rows
2795 : * into NOT MATCHED rows. However, we do not attempt to support cases
2796 : * that would turn NOT MATCHED rows into MATCHED rows, or which would
2797 : * cause a target row to match a different source row.
2798 : *
2799 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2800 : * [BY TARGET].
2801 : *
2802 : * ExecMergeMatched() takes care of following the update chain and
2803 : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
2804 : * action, as long as the target tuple still exists. If the target tuple
2805 : * gets deleted or a concurrent update causes the join quals to fail, it
2806 : * returns a matched status of false and we call ExecMergeNotMatched().
2807 : * Given that ExecMergeMatched() always makes progress by following the
2808 : * update chain and we never switch from ExecMergeNotMatched() to
2809 : * ExecMergeMatched(), there is no risk of a livelock.
2810 : */
2811 13992 : matched = tupleid != NULL || oldtuple != NULL;
2812 13992 : if (matched)
2813 11368 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
2814 : canSetTag, &matched);
2815 :
2816 : /*
2817 : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
2818 : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
2819 : * "matched" to false, indicating that it no longer matches).
2820 : */
2821 13902 : if (!matched)
2822 : {
2823 : /*
2824 : * If a concurrent update turned a MATCHED case into a NOT MATCHED
2825 : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
2826 : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
2827 : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
2828 : * SOURCE action, and computed the row to return. If so, we cannot
2829 : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
2830 : * pending (to be processed on the next call to ExecModifyTable()).
2831 : * Otherwise, just process the action now.
2832 : */
2833 2640 : if (rslot == NULL)
2834 2638 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
2835 : else
2836 2 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
2837 : }
2838 :
2839 13848 : return rslot;
2840 : }
2841 :
2842 : /*
2843 : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
2844 : * action, depending on whether the join quals are satisfied. If the target
2845 : * relation is a table, the current target tuple is identified by tupleid.
2846 : * Otherwise, if the target relation is a view, oldtuple is the current target
2847 : * tuple from the view.
2848 : *
2849 : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
2850 : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
2851 : * action do not pass, we check the second, then the third and so on. If we
2852 : * reach the end without finding a qualifying action, we return NULL.
2853 : * Otherwise, we execute the qualifying action and return its RETURNING
2854 : * result, if any, or NULL.
2855 : *
2856 : * On entry, "*matched" is assumed to be true. If a concurrent update or
2857 : * delete is detected that causes the join quals to no longer pass, we set it
2858 : * to false, indicating that the caller should process any NOT MATCHED [BY
2859 : * TARGET] actions.
2860 : *
2861 : * After a concurrent update, we restart from the first action to look for a
2862 : * new qualifying action to execute. If the join quals originally passed, and
2863 : * the concurrent update caused them to no longer pass, then we switch from
2864 : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
2865 : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
2866 : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
2867 : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
2868 : */
2869 : static TupleTableSlot *
2870 11368 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2871 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
2872 : bool *matched)
2873 : {
2874 11368 : ModifyTableState *mtstate = context->mtstate;
2875 11368 : List **mergeActions = resultRelInfo->ri_MergeActions;
2876 : ItemPointerData lockedtid;
2877 : List *actionStates;
2878 11368 : TupleTableSlot *newslot = NULL;
2879 11368 : TupleTableSlot *rslot = NULL;
2880 11368 : EState *estate = context->estate;
2881 11368 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2882 : bool isNull;
2883 11368 : EPQState *epqstate = &mtstate->mt_epqstate;
2884 : ListCell *l;
2885 :
2886 : /* Expect matched to be true on entry */
2887 : Assert(*matched);
2888 :
2889 : /*
2890 : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
2891 : * are done.
2892 : */
2893 11368 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
2894 1200 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
2895 528 : return NULL;
2896 :
2897 : /*
2898 : * Make tuple and any needed join variables available to ExecQual and
2899 : * ExecProject. The target's existing tuple is installed in the scantuple.
2900 : * This target relation's slot is required only in the case of a MATCHED
2901 : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
2902 : */
2903 10840 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
2904 10840 : econtext->ecxt_innertuple = context->planSlot;
2905 10840 : econtext->ecxt_outertuple = NULL;
2906 :
2907 : /*
2908 : * This routine is only invoked for matched target rows, so we should
2909 : * either have the tupleid of the target row, or an old tuple from the
2910 : * target wholerow junk attr.
2911 : */
2912 : Assert(tupleid != NULL || oldtuple != NULL);
2913 10840 : ItemPointerSetInvalid(&lockedtid);
2914 10840 : if (oldtuple != NULL)
2915 : {
2916 : Assert(!resultRelInfo->ri_needLockTagTuple);
2917 96 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
2918 : false);
2919 : }
2920 : else
2921 : {
2922 10744 : if (resultRelInfo->ri_needLockTagTuple)
2923 : {
2924 : /*
2925 : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
2926 : * that don't match mas_whenqual. MERGE on system catalogs is a
2927 : * minor use case, so don't bother optimizing those.
2928 : */
2929 6946 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
2930 : InplaceUpdateTupleLock);
2931 6946 : lockedtid = *tupleid;
2932 : }
2933 10744 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2934 : tupleid,
2935 : SnapshotAny,
2936 : resultRelInfo->ri_oldTupleSlot))
2937 0 : elog(ERROR, "failed to fetch the target tuple");
2938 : }
2939 :
2940 : /*
2941 : * Test the join condition. If it's satisfied, perform a MATCHED action.
2942 : * Otherwise, perform a NOT MATCHED BY SOURCE action.
2943 : *
2944 : * Note that this join condition will be NULL if there are no NOT MATCHED
2945 : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
2946 : * need only consider MATCHED actions here.
2947 : */
2948 10840 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
2949 10658 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
2950 : else
2951 182 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
2952 :
2953 10840 : lmerge_matched:
2954 :
2955 19302 : foreach(l, actionStates)
2956 : {
2957 10968 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
2958 10968 : CmdType commandType = relaction->mas_action->commandType;
2959 : TM_Result result;
2960 10968 : UpdateContext updateCxt = {0};
2961 :
2962 : /*
2963 : * Test condition, if any.
2964 : *
2965 : * In the absence of any condition, we perform the action
2966 : * unconditionally (no need to check separately since ExecQual() will
2967 : * return true if there are no conditions to evaluate).
2968 : */
2969 10968 : if (!ExecQual(relaction->mas_whenqual, econtext))
2970 8398 : continue;
2971 :
2972 : /*
2973 : * Check if the existing target tuple meets the USING checks of
2974 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
2975 : * error.
2976 : *
2977 : * The WITH CHECK quals for UPDATE RLS policies are applied in
2978 : * ExecUpdateAct() and hence we need not do anything special to handle
2979 : * them.
2980 : *
2981 : * NOTE: We must do this after WHEN quals are evaluated, so that we
2982 : * check policies only when they matter.
2983 : */
2984 2570 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
2985 : {
2986 90 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
2987 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
2988 : resultRelInfo,
2989 : resultRelInfo->ri_oldTupleSlot,
2990 90 : context->mtstate->ps.state);
2991 : }
2992 :
2993 : /* Perform stated action */
2994 2546 : switch (commandType)
2995 : {
2996 2128 : case CMD_UPDATE:
2997 :
2998 : /*
2999 : * Project the output tuple, and use that to update the table.
3000 : * We don't need to filter out junk attributes, because the
3001 : * UPDATE action's targetlist doesn't have any.
3002 : */
3003 2128 : newslot = ExecProject(relaction->mas_proj);
3004 :
3005 2128 : mtstate->mt_merge_action = relaction;
3006 2128 : if (!ExecUpdatePrologue(context, resultRelInfo,
3007 : tupleid, NULL, newslot, &result))
3008 : {
3009 18 : if (result == TM_Ok)
3010 156 : goto out; /* "do nothing" */
3011 :
3012 12 : break; /* concurrent update/delete */
3013 : }
3014 :
3015 : /* INSTEAD OF ROW UPDATE Triggers */
3016 2110 : if (resultRelInfo->ri_TrigDesc &&
3017 334 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3018 : {
3019 78 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3020 : oldtuple, newslot))
3021 0 : goto out; /* "do nothing" */
3022 : }
3023 : else
3024 : {
3025 : /* checked ri_needLockTagTuple above */
3026 : Assert(oldtuple == NULL);
3027 :
3028 2032 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3029 : NULL, newslot, canSetTag,
3030 : &updateCxt);
3031 :
3032 : /*
3033 : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3034 : * cross-partition update was done, then there's nothing
3035 : * else for us to do --- the UPDATE has been turned into a
3036 : * DELETE and an INSERT, and we must not perform any of
3037 : * the usual post-update tasks. Also, the RETURNING tuple
3038 : * (if any) has been projected, so we can just return
3039 : * that.
3040 : */
3041 2012 : if (updateCxt.crossPartUpdate)
3042 : {
3043 134 : mtstate->mt_merge_updated += 1;
3044 134 : rslot = context->cpUpdateReturningSlot;
3045 134 : goto out;
3046 : }
3047 : }
3048 :
3049 1956 : if (result == TM_Ok)
3050 : {
3051 1884 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3052 : tupleid, NULL, newslot);
3053 1872 : mtstate->mt_merge_updated += 1;
3054 : }
3055 1944 : break;
3056 :
3057 388 : case CMD_DELETE:
3058 388 : mtstate->mt_merge_action = relaction;
3059 388 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3060 : NULL, NULL, &result))
3061 : {
3062 12 : if (result == TM_Ok)
3063 6 : goto out; /* "do nothing" */
3064 :
3065 6 : break; /* concurrent update/delete */
3066 : }
3067 :
3068 : /* INSTEAD OF ROW DELETE Triggers */
3069 376 : if (resultRelInfo->ri_TrigDesc &&
3070 44 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3071 : {
3072 6 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3073 : oldtuple))
3074 0 : goto out; /* "do nothing" */
3075 : }
3076 : else
3077 : {
3078 : /* checked ri_needLockTagTuple above */
3079 : Assert(oldtuple == NULL);
3080 :
3081 370 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3082 : false);
3083 : }
3084 :
3085 376 : if (result == TM_Ok)
3086 : {
3087 358 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3088 : false);
3089 358 : mtstate->mt_merge_deleted += 1;
3090 : }
3091 376 : break;
3092 :
3093 30 : case CMD_NOTHING:
3094 : /* Doing nothing is always OK */
3095 30 : result = TM_Ok;
3096 30 : break;
3097 :
3098 0 : default:
3099 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3100 : }
3101 :
3102 2368 : switch (result)
3103 : {
3104 2260 : case TM_Ok:
3105 : /* all good; perform final actions */
3106 2260 : if (canSetTag && commandType != CMD_NOTHING)
3107 2212 : (estate->es_processed)++;
3108 :
3109 2260 : break;
3110 :
3111 32 : case TM_SelfModified:
3112 :
3113 : /*
3114 : * The target tuple was already updated or deleted by the
3115 : * current command, or by a later command in the current
3116 : * transaction. The former case is explicitly disallowed by
3117 : * the SQL standard for MERGE, which insists that the MERGE
3118 : * join condition should not join a target row to more than
3119 : * one source row.
3120 : *
3121 : * The latter case arises if the tuple is modified by a
3122 : * command in a BEFORE trigger, or perhaps by a command in a
3123 : * volatile function used in the query. In such situations we
3124 : * should not ignore the MERGE action, but it is equally
3125 : * unsafe to proceed. We don't want to discard the original
3126 : * MERGE action while keeping the triggered actions based on
3127 : * it; and it would be no better to allow the original MERGE
3128 : * action while discarding the updates that it triggered. So
3129 : * throwing an error is the only safe course.
3130 : */
3131 32 : if (context->tmfd.cmax != estate->es_output_cid)
3132 12 : ereport(ERROR,
3133 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3134 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3135 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3136 :
3137 20 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3138 20 : ereport(ERROR,
3139 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3140 : /* translator: %s is a SQL command name */
3141 : errmsg("%s command cannot affect row a second time",
3142 : "MERGE"),
3143 : errhint("Ensure that not more than one source row matches any one target row.")));
3144 :
3145 : /* This shouldn't happen */
3146 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3147 : break;
3148 :
3149 10 : case TM_Deleted:
3150 10 : if (IsolationUsesXactSnapshot())
3151 0 : ereport(ERROR,
3152 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3153 : errmsg("could not serialize access due to concurrent delete")));
3154 :
3155 : /*
3156 : * If the tuple was already deleted, set matched to false to
3157 : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3158 : */
3159 10 : *matched = false;
3160 10 : goto out;
3161 :
3162 66 : case TM_Updated:
3163 : {
3164 : bool was_matched;
3165 : Relation resultRelationDesc;
3166 : TupleTableSlot *epqslot,
3167 : *inputslot;
3168 : LockTupleMode lockmode;
3169 :
3170 : /*
3171 : * The target tuple was concurrently updated by some other
3172 : * transaction. If we are currently processing a MATCHED
3173 : * action, use EvalPlanQual() with the new version of the
3174 : * tuple and recheck the join qual, to detect a change
3175 : * from the MATCHED to the NOT MATCHED cases. If we are
3176 : * already processing a NOT MATCHED BY SOURCE action, we
3177 : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3178 : * MATCHED).
3179 : */
3180 66 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3181 66 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3182 66 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3183 :
3184 66 : if (was_matched)
3185 66 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3186 : resultRelInfo->ri_RangeTableIndex);
3187 : else
3188 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3189 :
3190 66 : result = table_tuple_lock(resultRelationDesc, tupleid,
3191 : estate->es_snapshot,
3192 : inputslot, estate->es_output_cid,
3193 : lockmode, LockWaitBlock,
3194 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3195 : &context->tmfd);
3196 66 : switch (result)
3197 : {
3198 64 : case TM_Ok:
3199 :
3200 : /*
3201 : * If the tuple was updated and migrated to
3202 : * another partition concurrently, the current
3203 : * MERGE implementation can't follow. There's
3204 : * probably a better way to handle this case, but
3205 : * it'd require recognizing the relation to which
3206 : * the tuple moved, and setting our current
3207 : * resultRelInfo to that.
3208 : */
3209 64 : if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid))
3210 0 : ereport(ERROR,
3211 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3212 : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3213 :
3214 : /*
3215 : * If this was a MATCHED case, use EvalPlanQual()
3216 : * to recheck the join condition.
3217 : */
3218 64 : if (was_matched)
3219 : {
3220 64 : epqslot = EvalPlanQual(epqstate,
3221 : resultRelationDesc,
3222 : resultRelInfo->ri_RangeTableIndex,
3223 : inputslot);
3224 :
3225 : /*
3226 : * If the subplan didn't return a tuple, then
3227 : * we must be dealing with an inner join for
3228 : * which the join condition no longer matches.
3229 : * This can only happen if there are no NOT
3230 : * MATCHED actions, and so there is nothing
3231 : * more to do.
3232 : */
3233 64 : if (TupIsNull(epqslot))
3234 0 : goto out;
3235 :
3236 : /*
3237 : * If we got a NULL ctid from the subplan, the
3238 : * join quals no longer pass and we switch to
3239 : * the NOT MATCHED BY SOURCE case.
3240 : */
3241 64 : (void) ExecGetJunkAttribute(epqslot,
3242 64 : resultRelInfo->ri_RowIdAttNo,
3243 : &isNull);
3244 64 : if (isNull)
3245 4 : *matched = false;
3246 :
3247 : /*
3248 : * Otherwise, recheck the join quals to see if
3249 : * we need to switch to the NOT MATCHED BY
3250 : * SOURCE case.
3251 : */
3252 64 : if (resultRelInfo->ri_needLockTagTuple)
3253 : {
3254 2 : if (ItemPointerIsValid(&lockedtid))
3255 2 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3256 : InplaceUpdateTupleLock);
3257 2 : LockTuple(resultRelInfo->ri_RelationDesc, &context->tmfd.ctid,
3258 : InplaceUpdateTupleLock);
3259 2 : lockedtid = context->tmfd.ctid;
3260 : }
3261 64 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3262 : &context->tmfd.ctid,
3263 : SnapshotAny,
3264 : resultRelInfo->ri_oldTupleSlot))
3265 0 : elog(ERROR, "failed to fetch the target tuple");
3266 :
3267 64 : if (*matched)
3268 60 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3269 : econtext);
3270 :
3271 : /* Switch lists, if necessary */
3272 64 : if (!*matched)
3273 6 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3274 : }
3275 :
3276 : /*
3277 : * Loop back and process the MATCHED or NOT
3278 : * MATCHED BY SOURCE actions from the start.
3279 : */
3280 64 : goto lmerge_matched;
3281 :
3282 0 : case TM_Deleted:
3283 :
3284 : /*
3285 : * tuple already deleted; tell caller to run NOT
3286 : * MATCHED [BY TARGET] actions
3287 : */
3288 0 : *matched = false;
3289 0 : goto out;
3290 :
3291 2 : case TM_SelfModified:
3292 :
3293 : /*
3294 : * This can be reached when following an update
3295 : * chain from a tuple updated by another session,
3296 : * reaching a tuple that was already updated or
3297 : * deleted by the current command, or by a later
3298 : * command in the current transaction. As above,
3299 : * this should always be treated as an error.
3300 : */
3301 2 : if (context->tmfd.cmax != estate->es_output_cid)
3302 0 : ereport(ERROR,
3303 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3304 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3305 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3306 :
3307 2 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3308 2 : ereport(ERROR,
3309 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3310 : /* translator: %s is a SQL command name */
3311 : errmsg("%s command cannot affect row a second time",
3312 : "MERGE"),
3313 : errhint("Ensure that not more than one source row matches any one target row.")));
3314 :
3315 : /* This shouldn't happen */
3316 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3317 : goto out;
3318 :
3319 0 : default:
3320 : /* see table_tuple_lock call in ExecDelete() */
3321 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3322 : result);
3323 : goto out;
3324 : }
3325 : }
3326 :
3327 0 : case TM_Invisible:
3328 : case TM_WouldBlock:
3329 : case TM_BeingModified:
3330 : /* these should not occur */
3331 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3332 : break;
3333 : }
3334 :
3335 : /* Process RETURNING if present */
3336 2260 : if (resultRelInfo->ri_projectReturning)
3337 : {
3338 270 : switch (commandType)
3339 : {
3340 162 : case CMD_UPDATE:
3341 162 : rslot = ExecProcessReturning(resultRelInfo, newslot,
3342 : context->planSlot);
3343 162 : break;
3344 :
3345 108 : case CMD_DELETE:
3346 108 : rslot = ExecProcessReturning(resultRelInfo,
3347 : resultRelInfo->ri_oldTupleSlot,
3348 : context->planSlot);
3349 108 : break;
3350 :
3351 0 : case CMD_NOTHING:
3352 0 : break;
3353 :
3354 0 : default:
3355 0 : elog(ERROR, "unrecognized commandType: %d",
3356 : (int) commandType);
3357 : }
3358 : }
3359 :
3360 : /*
3361 : * We've activated one of the WHEN clauses, so we don't search
3362 : * further. This is required behaviour, not an optimization.
3363 : */
3364 2260 : break;
3365 : }
3366 :
3367 : /*
3368 : * Successfully executed an action or no qualifying action was found.
3369 : */
3370 10750 : out:
3371 10750 : if (ItemPointerIsValid(&lockedtid))
3372 6946 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3373 : InplaceUpdateTupleLock);
3374 10750 : return rslot;
3375 : }
3376 :
3377 : /*
3378 : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3379 : */
3380 : static TupleTableSlot *
3381 2640 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3382 : bool canSetTag)
3383 : {
3384 2640 : ModifyTableState *mtstate = context->mtstate;
3385 2640 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3386 : List *actionStates;
3387 2640 : TupleTableSlot *rslot = NULL;
3388 : ListCell *l;
3389 :
3390 : /*
3391 : * For INSERT actions, the root relation's merge action is OK since the
3392 : * INSERT's targetlist and the WHEN conditions can only refer to the
3393 : * source relation and hence it does not matter which result relation we
3394 : * work with.
3395 : *
3396 : * XXX does this mean that we can avoid creating copies of actionStates on
3397 : * partitioned tables, for not-matched actions?
3398 : */
3399 2640 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3400 :
3401 : /*
3402 : * Make source tuple available to ExecQual and ExecProject. We don't need
3403 : * the target tuple, since the WHEN quals and targetlist can't refer to
3404 : * the target columns.
3405 : */
3406 2640 : econtext->ecxt_scantuple = NULL;
3407 2640 : econtext->ecxt_innertuple = context->planSlot;
3408 2640 : econtext->ecxt_outertuple = NULL;
3409 :
3410 3510 : foreach(l, actionStates)
3411 : {
3412 2640 : MergeActionState *action = (MergeActionState *) lfirst(l);
3413 2640 : CmdType commandType = action->mas_action->commandType;
3414 : TupleTableSlot *newslot;
3415 :
3416 : /*
3417 : * Test condition, if any.
3418 : *
3419 : * In the absence of any condition, we perform the action
3420 : * unconditionally (no need to check separately since ExecQual() will
3421 : * return true if there are no conditions to evaluate).
3422 : */
3423 2640 : if (!ExecQual(action->mas_whenqual, econtext))
3424 870 : continue;
3425 :
3426 : /* Perform stated action */
3427 1770 : switch (commandType)
3428 : {
3429 1770 : case CMD_INSERT:
3430 :
3431 : /*
3432 : * Project the tuple. In case of a partitioned table, the
3433 : * projection was already built to use the root's descriptor,
3434 : * so we don't need to map the tuple here.
3435 : */
3436 1770 : newslot = ExecProject(action->mas_proj);
3437 1770 : mtstate->mt_merge_action = action;
3438 :
3439 1770 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3440 : newslot, canSetTag, NULL, NULL);
3441 1716 : mtstate->mt_merge_inserted += 1;
3442 1716 : break;
3443 0 : case CMD_NOTHING:
3444 : /* Do nothing */
3445 0 : break;
3446 0 : default:
3447 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3448 : }
3449 :
3450 : /*
3451 : * We've activated one of the WHEN clauses, so we don't search
3452 : * further. This is required behaviour, not an optimization.
3453 : */
3454 1716 : break;
3455 : }
3456 :
3457 2586 : return rslot;
3458 : }
3459 :
3460 : /*
3461 : * Initialize state for execution of MERGE.
3462 : */
3463 : void
3464 1432 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3465 : {
3466 1432 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3467 1432 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3468 : ResultRelInfo *resultRelInfo;
3469 : ExprContext *econtext;
3470 : ListCell *lc;
3471 : int i;
3472 :
3473 1432 : if (node->mergeActionLists == NIL)
3474 0 : return;
3475 :
3476 1432 : mtstate->mt_merge_subcommands = 0;
3477 :
3478 1432 : if (mtstate->ps.ps_ExprContext == NULL)
3479 1254 : ExecAssignExprContext(estate, &mtstate->ps);
3480 1432 : econtext = mtstate->ps.ps_ExprContext;
3481 :
3482 : /*
3483 : * Create a MergeActionState for each action on the mergeActionList and
3484 : * add it to either a list of matched actions or not-matched actions.
3485 : *
3486 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3487 : * anything here, do so there too.
3488 : */
3489 1432 : i = 0;
3490 3096 : foreach(lc, node->mergeActionLists)
3491 : {
3492 1664 : List *mergeActionList = lfirst(lc);
3493 : Node *joinCondition;
3494 : TupleDesc relationDesc;
3495 : ListCell *l;
3496 :
3497 1664 : joinCondition = (Node *) list_nth(node->mergeJoinConditions, i);
3498 1664 : resultRelInfo = mtstate->resultRelInfo + i;
3499 1664 : i++;
3500 1664 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3501 :
3502 : /* initialize slots for MERGE fetches from this rel */
3503 1664 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3504 1664 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3505 :
3506 : /* initialize state for join condition checking */
3507 1664 : resultRelInfo->ri_MergeJoinCondition =
3508 1664 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3509 :
3510 4640 : foreach(l, mergeActionList)
3511 : {
3512 2976 : MergeAction *action = (MergeAction *) lfirst(l);
3513 : MergeActionState *action_state;
3514 : TupleTableSlot *tgtslot;
3515 : TupleDesc tgtdesc;
3516 :
3517 : /*
3518 : * Build action merge state for this rel. (For partitions,
3519 : * equivalent code exists in ExecInitPartitionInfo.)
3520 : */
3521 2976 : action_state = makeNode(MergeActionState);
3522 2976 : action_state->mas_action = action;
3523 2976 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3524 : &mtstate->ps);
3525 :
3526 : /*
3527 : * We create three lists - one for each MergeMatchKind - and stick
3528 : * the MergeActionState into the appropriate list.
3529 : */
3530 5952 : resultRelInfo->ri_MergeActions[action->matchKind] =
3531 2976 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3532 : action_state);
3533 :
3534 2976 : switch (action->commandType)
3535 : {
3536 980 : case CMD_INSERT:
3537 980 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3538 : action->targetList);
3539 :
3540 : /*
3541 : * If the MERGE targets a partitioned table, any INSERT
3542 : * actions must be routed through it, not the child
3543 : * relations. Initialize the routing struct and the root
3544 : * table's "new" tuple slot for that, if not already done.
3545 : * The projection we prepare, for all relations, uses the
3546 : * root relation descriptor, and targets the plan's root
3547 : * slot. (This is consistent with the fact that we
3548 : * checked the plan output to match the root relation,
3549 : * above.)
3550 : */
3551 980 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3552 : RELKIND_PARTITIONED_TABLE)
3553 : {
3554 298 : if (mtstate->mt_partition_tuple_routing == NULL)
3555 : {
3556 : /*
3557 : * Initialize planstate for routing if not already
3558 : * done.
3559 : *
3560 : * Note that the slot is managed as a standalone
3561 : * slot belonging to ModifyTableState, so we pass
3562 : * NULL for the 2nd argument.
3563 : */
3564 124 : mtstate->mt_root_tuple_slot =
3565 124 : table_slot_create(rootRelInfo->ri_RelationDesc,
3566 : NULL);
3567 124 : mtstate->mt_partition_tuple_routing =
3568 124 : ExecSetupPartitionTupleRouting(estate,
3569 : rootRelInfo->ri_RelationDesc);
3570 : }
3571 298 : tgtslot = mtstate->mt_root_tuple_slot;
3572 298 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3573 : }
3574 : else
3575 : {
3576 : /* not partitioned? use the stock relation and slot */
3577 682 : tgtslot = resultRelInfo->ri_newTupleSlot;
3578 682 : tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3579 : }
3580 :
3581 980 : action_state->mas_proj =
3582 980 : ExecBuildProjectionInfo(action->targetList, econtext,
3583 : tgtslot,
3584 : &mtstate->ps,
3585 : tgtdesc);
3586 :
3587 980 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3588 980 : break;
3589 1522 : case CMD_UPDATE:
3590 1522 : action_state->mas_proj =
3591 1522 : ExecBuildUpdateProjection(action->targetList,
3592 : true,
3593 : action->updateColnos,
3594 : relationDesc,
3595 : econtext,
3596 : resultRelInfo->ri_newTupleSlot,
3597 : &mtstate->ps);
3598 1522 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3599 1522 : break;
3600 416 : case CMD_DELETE:
3601 416 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3602 416 : break;
3603 58 : case CMD_NOTHING:
3604 58 : break;
3605 0 : default:
3606 0 : elog(ERROR, "unknown operation");
3607 : break;
3608 : }
3609 : }
3610 : }
3611 : }
3612 :
3613 : /*
3614 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3615 : *
3616 : * We mark 'projectNewInfoValid' even though the projections themselves
3617 : * are not initialized here.
3618 : */
3619 : void
3620 1682 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3621 : ResultRelInfo *resultRelInfo)
3622 : {
3623 1682 : EState *estate = mtstate->ps.state;
3624 :
3625 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3626 :
3627 1682 : resultRelInfo->ri_oldTupleSlot =
3628 1682 : table_slot_create(resultRelInfo->ri_RelationDesc,
3629 : &estate->es_tupleTable);
3630 1682 : resultRelInfo->ri_newTupleSlot =
3631 1682 : table_slot_create(resultRelInfo->ri_RelationDesc,
3632 : &estate->es_tupleTable);
3633 1682 : resultRelInfo->ri_projectNewInfoValid = true;
3634 1682 : }
3635 :
3636 : /*
3637 : * Process BEFORE EACH STATEMENT triggers
3638 : */
3639 : static void
3640 118106 : fireBSTriggers(ModifyTableState *node)
3641 : {
3642 118106 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3643 118106 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3644 :
3645 118106 : switch (node->operation)
3646 : {
3647 91950 : case CMD_INSERT:
3648 91950 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3649 91938 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3650 828 : ExecBSUpdateTriggers(node->ps.state,
3651 : resultRelInfo);
3652 91938 : break;
3653 12732 : case CMD_UPDATE:
3654 12732 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3655 12732 : break;
3656 12112 : case CMD_DELETE:
3657 12112 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3658 12112 : break;
3659 1312 : case CMD_MERGE:
3660 1312 : if (node->mt_merge_subcommands & MERGE_INSERT)
3661 722 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3662 1312 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3663 930 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3664 1312 : if (node->mt_merge_subcommands & MERGE_DELETE)
3665 344 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3666 1312 : break;
3667 0 : default:
3668 0 : elog(ERROR, "unknown operation");
3669 : break;
3670 : }
3671 118094 : }
3672 :
3673 : /*
3674 : * Process AFTER EACH STATEMENT triggers
3675 : */
3676 : static void
3677 114940 : fireASTriggers(ModifyTableState *node)
3678 : {
3679 114940 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3680 114940 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3681 :
3682 114940 : switch (node->operation)
3683 : {
3684 89740 : case CMD_INSERT:
3685 89740 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3686 726 : ExecASUpdateTriggers(node->ps.state,
3687 : resultRelInfo,
3688 726 : node->mt_oc_transition_capture);
3689 89740 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3690 89740 : node->mt_transition_capture);
3691 89740 : break;
3692 12044 : case CMD_UPDATE:
3693 12044 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3694 12044 : node->mt_transition_capture);
3695 12044 : break;
3696 11988 : case CMD_DELETE:
3697 11988 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3698 11988 : node->mt_transition_capture);
3699 11988 : break;
3700 1168 : case CMD_MERGE:
3701 1168 : if (node->mt_merge_subcommands & MERGE_DELETE)
3702 308 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3703 308 : node->mt_transition_capture);
3704 1168 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3705 834 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3706 834 : node->mt_transition_capture);
3707 1168 : if (node->mt_merge_subcommands & MERGE_INSERT)
3708 660 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3709 660 : node->mt_transition_capture);
3710 1168 : break;
3711 0 : default:
3712 0 : elog(ERROR, "unknown operation");
3713 : break;
3714 : }
3715 114940 : }
3716 :
3717 : /*
3718 : * Set up the state needed for collecting transition tuples for AFTER
3719 : * triggers.
3720 : */
3721 : static void
3722 118404 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
3723 : {
3724 118404 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
3725 118404 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
3726 :
3727 : /* Check for transition tables on the directly targeted relation. */
3728 118404 : mtstate->mt_transition_capture =
3729 118404 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3730 118404 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3731 : mtstate->operation);
3732 118404 : if (plan->operation == CMD_INSERT &&
3733 91952 : plan->onConflictAction == ONCONFLICT_UPDATE)
3734 828 : mtstate->mt_oc_transition_capture =
3735 828 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3736 828 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3737 : CMD_UPDATE);
3738 118404 : }
3739 :
3740 : /*
3741 : * ExecPrepareTupleRouting --- prepare for routing one tuple
3742 : *
3743 : * Determine the partition in which the tuple in slot is to be inserted,
3744 : * and return its ResultRelInfo in *partRelInfo. The return value is
3745 : * a slot holding the tuple of the partition rowtype.
3746 : *
3747 : * This also sets the transition table information in mtstate based on the
3748 : * selected partition.
3749 : */
3750 : static TupleTableSlot *
3751 722156 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
3752 : EState *estate,
3753 : PartitionTupleRouting *proute,
3754 : ResultRelInfo *targetRelInfo,
3755 : TupleTableSlot *slot,
3756 : ResultRelInfo **partRelInfo)
3757 : {
3758 : ResultRelInfo *partrel;
3759 : TupleConversionMap *map;
3760 :
3761 : /*
3762 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3763 : * not find a valid partition for the tuple in 'slot' then an error is
3764 : * raised. An error may also be raised if the found partition is not a
3765 : * valid target for INSERTs. This is required since a partitioned table
3766 : * UPDATE to another partition becomes a DELETE+INSERT.
3767 : */
3768 722156 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3769 :
3770 : /*
3771 : * If we're capturing transition tuples, we might need to convert from the
3772 : * partition rowtype to root partitioned table's rowtype. But if there
3773 : * are no BEFORE triggers on the partition that could change the tuple, we
3774 : * can just remember the original unconverted tuple to avoid a needless
3775 : * round trip conversion.
3776 : */
3777 721952 : if (mtstate->mt_transition_capture != NULL)
3778 : {
3779 : bool has_before_insert_row_trig;
3780 :
3781 168 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3782 42 : partrel->ri_TrigDesc->trig_insert_before_row);
3783 :
3784 126 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
3785 126 : !has_before_insert_row_trig ? slot : NULL;
3786 : }
3787 :
3788 : /*
3789 : * Convert the tuple, if necessary.
3790 : */
3791 721952 : map = ExecGetRootToChildMap(partrel, estate);
3792 721952 : if (map != NULL)
3793 : {
3794 68412 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3795 :
3796 68412 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3797 : }
3798 :
3799 721952 : *partRelInfo = partrel;
3800 721952 : return slot;
3801 : }
3802 :
3803 : /* ----------------------------------------------------------------
3804 : * ExecModifyTable
3805 : *
3806 : * Perform table modifications as required, and return RETURNING results
3807 : * if needed.
3808 : * ----------------------------------------------------------------
3809 : */
3810 : static TupleTableSlot *
3811 126340 : ExecModifyTable(PlanState *pstate)
3812 : {
3813 126340 : ModifyTableState *node = castNode(ModifyTableState, pstate);
3814 : ModifyTableContext context;
3815 126340 : EState *estate = node->ps.state;
3816 126340 : CmdType operation = node->operation;
3817 : ResultRelInfo *resultRelInfo;
3818 : PlanState *subplanstate;
3819 : TupleTableSlot *slot;
3820 : TupleTableSlot *oldSlot;
3821 : ItemPointerData tuple_ctid;
3822 : HeapTupleData oldtupdata;
3823 : HeapTuple oldtuple;
3824 : ItemPointer tupleid;
3825 : bool tuplock;
3826 :
3827 126340 : CHECK_FOR_INTERRUPTS();
3828 :
3829 : /*
3830 : * This should NOT get called during EvalPlanQual; we should have passed a
3831 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
3832 : * Assert because this condition is easy to miss in testing. (Note:
3833 : * although ModifyTable should not get executed within an EvalPlanQual
3834 : * operation, we do have to allow it to be initialized and shut down in
3835 : * case it is within a CTE subplan. Hence this test must be here, not in
3836 : * ExecInitModifyTable.)
3837 : */
3838 126340 : if (estate->es_epq_active != NULL)
3839 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
3840 :
3841 : /*
3842 : * If we've already completed processing, don't try to do more. We need
3843 : * this test because ExecPostprocessPlan might call us an extra time, and
3844 : * our subplan's nodes aren't necessarily robust against being called
3845 : * extra times.
3846 : */
3847 126340 : if (node->mt_done)
3848 782 : return NULL;
3849 :
3850 : /*
3851 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
3852 : */
3853 125558 : if (node->fireBSTriggers)
3854 : {
3855 118106 : fireBSTriggers(node);
3856 118094 : node->fireBSTriggers = false;
3857 : }
3858 :
3859 : /* Preload local variables */
3860 125546 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
3861 125546 : subplanstate = outerPlanState(node);
3862 :
3863 : /* Set global context */
3864 125546 : context.mtstate = node;
3865 125546 : context.epqstate = &node->mt_epqstate;
3866 125546 : context.estate = estate;
3867 :
3868 : /*
3869 : * Fetch rows from subplan, and execute the required table modification
3870 : * for each row.
3871 : */
3872 : for (;;)
3873 : {
3874 : /*
3875 : * Reset the per-output-tuple exprcontext. This is needed because
3876 : * triggers expect to use that context as workspace. It's a bit ugly
3877 : * to do this below the top level of the plan, however. We might need
3878 : * to rethink this later.
3879 : */
3880 13473388 : ResetPerTupleExprContext(estate);
3881 :
3882 : /*
3883 : * Reset per-tuple memory context used for processing on conflict and
3884 : * returning clauses, to free any expression evaluation storage
3885 : * allocated in the previous cycle.
3886 : */
3887 13473388 : if (pstate->ps_ExprContext)
3888 341752 : ResetExprContext(pstate->ps_ExprContext);
3889 :
3890 : /*
3891 : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
3892 : * to execute, do so now --- see the comments in ExecMerge().
3893 : */
3894 13473388 : if (node->mt_merge_pending_not_matched != NULL)
3895 : {
3896 2 : context.planSlot = node->mt_merge_pending_not_matched;
3897 :
3898 2 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
3899 2 : node->canSetTag);
3900 :
3901 : /* Clear the pending action */
3902 2 : node->mt_merge_pending_not_matched = NULL;
3903 :
3904 : /*
3905 : * If we got a RETURNING result, return it to the caller. We'll
3906 : * continue the work on next call.
3907 : */
3908 2 : if (slot)
3909 2 : return slot;
3910 :
3911 0 : continue; /* continue with the next tuple */
3912 : }
3913 :
3914 : /* Fetch the next row from subplan */
3915 13473386 : context.planSlot = ExecProcNode(subplanstate);
3916 :
3917 : /* No more tuples to process? */
3918 13472986 : if (TupIsNull(context.planSlot))
3919 : break;
3920 :
3921 : /*
3922 : * When there are multiple result relations, each tuple contains a
3923 : * junk column that gives the OID of the rel from which it came.
3924 : * Extract it and select the correct result relation.
3925 : */
3926 13358046 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
3927 : {
3928 : Datum datum;
3929 : bool isNull;
3930 : Oid resultoid;
3931 :
3932 4944 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
3933 : &isNull);
3934 4944 : if (isNull)
3935 : {
3936 : /*
3937 : * For commands other than MERGE, any tuples having InvalidOid
3938 : * for tableoid are errors. For MERGE, we may need to handle
3939 : * them as WHEN NOT MATCHED clauses if any, so do that.
3940 : *
3941 : * Note that we use the node's toplevel resultRelInfo, not any
3942 : * specific partition's.
3943 : */
3944 466 : if (operation == CMD_MERGE)
3945 : {
3946 466 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3947 :
3948 466 : slot = ExecMerge(&context, node->resultRelInfo,
3949 466 : NULL, NULL, node->canSetTag);
3950 :
3951 : /*
3952 : * If we got a RETURNING result, return it to the caller.
3953 : * We'll continue the work on next call.
3954 : */
3955 460 : if (slot)
3956 20 : return slot;
3957 :
3958 440 : continue; /* continue with the next tuple */
3959 : }
3960 :
3961 0 : elog(ERROR, "tableoid is NULL");
3962 : }
3963 4478 : resultoid = DatumGetObjectId(datum);
3964 :
3965 : /* If it's not the same as last time, we need to locate the rel */
3966 4478 : if (resultoid != node->mt_lastResultOid)
3967 3046 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
3968 : false, true);
3969 : }
3970 :
3971 : /*
3972 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
3973 : * here is compute the RETURNING expressions.
3974 : */
3975 13357580 : if (resultRelInfo->ri_usesFdwDirectModify)
3976 : {
3977 : Assert(resultRelInfo->ri_projectReturning);
3978 :
3979 : /*
3980 : * A scan slot containing the data that was actually inserted,
3981 : * updated or deleted has already been made available to
3982 : * ExecProcessReturning by IterateDirectModify, so no need to
3983 : * provide it here.
3984 : */
3985 694 : slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot);
3986 :
3987 694 : return slot;
3988 : }
3989 :
3990 13356886 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3991 13356886 : slot = context.planSlot;
3992 :
3993 13356886 : tupleid = NULL;
3994 13356886 : oldtuple = NULL;
3995 :
3996 : /*
3997 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
3998 : * to be updated/deleted/merged. For a heap relation, that's a TID;
3999 : * otherwise we may have a wholerow junk attr that carries the old
4000 : * tuple in toto. Keep this in step with the part of
4001 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4002 : */
4003 13356886 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4004 : operation == CMD_MERGE)
4005 : {
4006 : char relkind;
4007 : Datum datum;
4008 : bool isNull;
4009 :
4010 1960856 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4011 1960856 : if (relkind == RELKIND_RELATION ||
4012 502 : relkind == RELKIND_MATVIEW ||
4013 : relkind == RELKIND_PARTITIONED_TABLE)
4014 : {
4015 : /* ri_RowIdAttNo refers to a ctid attribute */
4016 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
4017 1960360 : datum = ExecGetJunkAttribute(slot,
4018 1960360 : resultRelInfo->ri_RowIdAttNo,
4019 : &isNull);
4020 :
4021 : /*
4022 : * For commands other than MERGE, any tuples having a null row
4023 : * identifier are errors. For MERGE, we may need to handle
4024 : * them as WHEN NOT MATCHED clauses if any, so do that.
4025 : *
4026 : * Note that we use the node's toplevel resultRelInfo, not any
4027 : * specific partition's.
4028 : */
4029 1960360 : if (isNull)
4030 : {
4031 2110 : if (operation == CMD_MERGE)
4032 : {
4033 2110 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4034 :
4035 2110 : slot = ExecMerge(&context, node->resultRelInfo,
4036 2110 : NULL, NULL, node->canSetTag);
4037 :
4038 : /*
4039 : * If we got a RETURNING result, return it to the
4040 : * caller. We'll continue the work on next call.
4041 : */
4042 2068 : if (slot)
4043 108 : return slot;
4044 :
4045 2002 : continue; /* continue with the next tuple */
4046 : }
4047 :
4048 0 : elog(ERROR, "ctid is NULL");
4049 : }
4050 :
4051 1958250 : tupleid = (ItemPointer) DatumGetPointer(datum);
4052 1958250 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4053 1958250 : tupleid = &tuple_ctid;
4054 : }
4055 :
4056 : /*
4057 : * Use the wholerow attribute, when available, to reconstruct the
4058 : * old relation tuple. The old tuple serves one or both of two
4059 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4060 : * provides values for any unchanged columns for the NEW tuple of
4061 : * an UPDATE, because the subplan does not produce all the columns
4062 : * of the target table.
4063 : *
4064 : * Note that the wholerow attribute does not carry system columns,
4065 : * so foreign table triggers miss seeing those, except that we
4066 : * know enough here to set t_tableOid. Quite separately from
4067 : * this, the FDW may fetch its own junk attrs to identify the row.
4068 : *
4069 : * Other relevant relkinds, currently limited to views, always
4070 : * have a wholerow attribute.
4071 : */
4072 496 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4073 : {
4074 478 : datum = ExecGetJunkAttribute(slot,
4075 478 : resultRelInfo->ri_RowIdAttNo,
4076 : &isNull);
4077 :
4078 : /*
4079 : * For commands other than MERGE, any tuples having a null row
4080 : * identifier are errors. For MERGE, we may need to handle
4081 : * them as WHEN NOT MATCHED clauses if any, so do that.
4082 : *
4083 : * Note that we use the node's toplevel resultRelInfo, not any
4084 : * specific partition's.
4085 : */
4086 478 : if (isNull)
4087 : {
4088 48 : if (operation == CMD_MERGE)
4089 : {
4090 48 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4091 :
4092 48 : slot = ExecMerge(&context, node->resultRelInfo,
4093 48 : NULL, NULL, node->canSetTag);
4094 :
4095 : /*
4096 : * If we got a RETURNING result, return it to the
4097 : * caller. We'll continue the work on next call.
4098 : */
4099 42 : if (slot)
4100 12 : return slot;
4101 :
4102 30 : continue; /* continue with the next tuple */
4103 : }
4104 :
4105 0 : elog(ERROR, "wholerow is NULL");
4106 : }
4107 :
4108 430 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4109 430 : oldtupdata.t_len =
4110 430 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4111 430 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4112 : /* Historically, view triggers see invalid t_tableOid. */
4113 430 : oldtupdata.t_tableOid =
4114 430 : (relkind == RELKIND_VIEW) ? InvalidOid :
4115 166 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4116 :
4117 430 : oldtuple = &oldtupdata;
4118 : }
4119 : else
4120 : {
4121 : /* Only foreign tables are allowed to omit a row-ID attr */
4122 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4123 : }
4124 : }
4125 :
4126 13354728 : switch (operation)
4127 : {
4128 11396030 : case CMD_INSERT:
4129 : /* Initialize projection info if first time for this table */
4130 11396030 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4131 90816 : ExecInitInsertProjection(node, resultRelInfo);
4132 11396030 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4133 11396030 : slot = ExecInsert(&context, resultRelInfo, slot,
4134 11396030 : node->canSetTag, NULL, NULL);
4135 11394012 : break;
4136 :
4137 308862 : case CMD_UPDATE:
4138 308862 : tuplock = false;
4139 :
4140 : /* Initialize projection info if first time for this table */
4141 308862 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4142 12472 : ExecInitUpdateProjection(node, resultRelInfo);
4143 :
4144 : /*
4145 : * Make the new tuple by combining plan's output tuple with
4146 : * the old tuple being updated.
4147 : */
4148 308862 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4149 308862 : if (oldtuple != NULL)
4150 : {
4151 : Assert(!resultRelInfo->ri_needLockTagTuple);
4152 : /* Use the wholerow junk attr as the old tuple. */
4153 262 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4154 : }
4155 : else
4156 : {
4157 : /* Fetch the most recent version of old tuple. */
4158 308600 : Relation relation = resultRelInfo->ri_RelationDesc;
4159 :
4160 308600 : if (resultRelInfo->ri_needLockTagTuple)
4161 : {
4162 16940 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4163 16940 : tuplock = true;
4164 : }
4165 308600 : if (!table_tuple_fetch_row_version(relation, tupleid,
4166 : SnapshotAny,
4167 : oldSlot))
4168 0 : elog(ERROR, "failed to fetch tuple being updated");
4169 : }
4170 308862 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4171 : oldSlot);
4172 :
4173 : /* Now apply the update. */
4174 308862 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4175 308862 : slot, node->canSetTag);
4176 308364 : if (tuplock)
4177 16940 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4178 : InplaceUpdateTupleLock);
4179 308364 : break;
4180 :
4181 1638468 : case CMD_DELETE:
4182 1638468 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4183 1638468 : true, false, node->canSetTag, NULL, NULL, NULL);
4184 1638386 : break;
4185 :
4186 11368 : case CMD_MERGE:
4187 11368 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4188 11368 : node->canSetTag);
4189 11278 : break;
4190 :
4191 0 : default:
4192 0 : elog(ERROR, "unknown operation");
4193 : break;
4194 : }
4195 :
4196 : /*
4197 : * If we got a RETURNING result, return it to caller. We'll continue
4198 : * the work on next call.
4199 : */
4200 13352040 : if (slot)
4201 6640 : return slot;
4202 : }
4203 :
4204 : /*
4205 : * Insert remaining tuples for batch insert.
4206 : */
4207 114940 : if (estate->es_insert_pending_result_relations != NIL)
4208 24 : ExecPendingInserts(estate);
4209 :
4210 : /*
4211 : * We're done, but fire AFTER STATEMENT triggers before exiting.
4212 : */
4213 114940 : fireASTriggers(node);
4214 :
4215 114940 : node->mt_done = true;
4216 :
4217 114940 : return NULL;
4218 : }
4219 :
4220 : /*
4221 : * ExecLookupResultRelByOid
4222 : * If the table with given OID is among the result relations to be
4223 : * updated by the given ModifyTable node, return its ResultRelInfo.
4224 : *
4225 : * If not found, return NULL if missing_ok, else raise error.
4226 : *
4227 : * If update_cache is true, then upon successful lookup, update the node's
4228 : * one-element cache. ONLY ExecModifyTable may pass true for this.
4229 : */
4230 : ResultRelInfo *
4231 11672 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4232 : bool missing_ok, bool update_cache)
4233 : {
4234 11672 : if (node->mt_resultOidHash)
4235 : {
4236 : /* Use the pre-built hash table to locate the rel */
4237 : MTTargetRelLookup *mtlookup;
4238 :
4239 : mtlookup = (MTTargetRelLookup *)
4240 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4241 0 : if (mtlookup)
4242 : {
4243 0 : if (update_cache)
4244 : {
4245 0 : node->mt_lastResultOid = resultoid;
4246 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
4247 : }
4248 0 : return node->resultRelInfo + mtlookup->relationIndex;
4249 : }
4250 : }
4251 : else
4252 : {
4253 : /* With few target rels, just search the ResultRelInfo array */
4254 22312 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4255 : {
4256 14168 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4257 :
4258 14168 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4259 : {
4260 3528 : if (update_cache)
4261 : {
4262 3046 : node->mt_lastResultOid = resultoid;
4263 3046 : node->mt_lastResultIndex = ndx;
4264 : }
4265 3528 : return rInfo;
4266 : }
4267 : }
4268 : }
4269 :
4270 8144 : if (!missing_ok)
4271 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
4272 8144 : return NULL;
4273 : }
4274 :
4275 : /* ----------------------------------------------------------------
4276 : * ExecInitModifyTable
4277 : * ----------------------------------------------------------------
4278 : */
4279 : ModifyTableState *
4280 119288 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4281 : {
4282 : ModifyTableState *mtstate;
4283 119288 : Plan *subplan = outerPlan(node);
4284 119288 : CmdType operation = node->operation;
4285 119288 : int nrels = list_length(node->resultRelations);
4286 : ResultRelInfo *resultRelInfo;
4287 : List *arowmarks;
4288 : ListCell *l;
4289 : int i;
4290 : Relation rel;
4291 :
4292 : /* check for unsupported flags */
4293 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4294 :
4295 : /*
4296 : * create state structure
4297 : */
4298 119288 : mtstate = makeNode(ModifyTableState);
4299 119288 : mtstate->ps.plan = (Plan *) node;
4300 119288 : mtstate->ps.state = estate;
4301 119288 : mtstate->ps.ExecProcNode = ExecModifyTable;
4302 :
4303 119288 : mtstate->operation = operation;
4304 119288 : mtstate->canSetTag = node->canSetTag;
4305 119288 : mtstate->mt_done = false;
4306 :
4307 119288 : mtstate->mt_nrels = nrels;
4308 119288 : mtstate->resultRelInfo = (ResultRelInfo *)
4309 119288 : palloc(nrels * sizeof(ResultRelInfo));
4310 :
4311 119288 : mtstate->mt_merge_pending_not_matched = NULL;
4312 119288 : mtstate->mt_merge_inserted = 0;
4313 119288 : mtstate->mt_merge_updated = 0;
4314 119288 : mtstate->mt_merge_deleted = 0;
4315 :
4316 : /*----------
4317 : * Resolve the target relation. This is the same as:
4318 : *
4319 : * - the relation for which we will fire FOR STATEMENT triggers,
4320 : * - the relation into whose tuple format all captured transition tuples
4321 : * must be converted, and
4322 : * - the root partitioned table used for tuple routing.
4323 : *
4324 : * If it's a partitioned or inherited table, the root partition or
4325 : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4326 : * given explicitly in node->rootRelation. Otherwise, the target relation
4327 : * is the sole relation in the node->resultRelations list.
4328 : *----------
4329 : */
4330 119288 : if (node->rootRelation > 0)
4331 : {
4332 2600 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4333 2600 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4334 : node->rootRelation);
4335 : }
4336 : else
4337 : {
4338 : Assert(list_length(node->resultRelations) == 1);
4339 116688 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4340 116688 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4341 116688 : linitial_int(node->resultRelations));
4342 : }
4343 :
4344 : /* set up epqstate with dummy subplan data for the moment */
4345 119288 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4346 : node->epqParam, node->resultRelations);
4347 119288 : mtstate->fireBSTriggers = true;
4348 :
4349 : /*
4350 : * Build state for collecting transition tuples. This requires having a
4351 : * valid trigger query context, so skip it in explain-only mode.
4352 : */
4353 119288 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4354 118404 : ExecSetupTransitionCaptureState(mtstate, estate);
4355 :
4356 : /*
4357 : * Open all the result relations and initialize the ResultRelInfo structs.
4358 : * (But root relation was initialized above, if it's part of the array.)
4359 : * We must do this before initializing the subplan, because direct-modify
4360 : * FDWs expect their ResultRelInfos to be available.
4361 : */
4362 119288 : resultRelInfo = mtstate->resultRelInfo;
4363 119288 : i = 0;
4364 240652 : foreach(l, node->resultRelations)
4365 : {
4366 121638 : Index resultRelation = lfirst_int(l);
4367 121638 : List *mergeActions = NIL;
4368 :
4369 121638 : if (node->mergeActionLists)
4370 1664 : mergeActions = list_nth(node->mergeActionLists, i);
4371 :
4372 121638 : if (resultRelInfo != mtstate->rootResultRelInfo)
4373 : {
4374 4950 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4375 :
4376 : /*
4377 : * For child result relations, store the root result relation
4378 : * pointer. We do so for the convenience of places that want to
4379 : * look at the query's original target relation but don't have the
4380 : * mtstate handy.
4381 : */
4382 4950 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4383 : }
4384 :
4385 : /* Initialize the usesFdwDirectModify flag */
4386 121638 : resultRelInfo->ri_usesFdwDirectModify =
4387 121638 : bms_is_member(i, node->fdwDirectModifyPlans);
4388 :
4389 : /*
4390 : * Verify result relation is a valid target for the current operation
4391 : */
4392 121638 : CheckValidResultRel(resultRelInfo, operation, mergeActions);
4393 :
4394 121364 : resultRelInfo++;
4395 121364 : i++;
4396 : }
4397 :
4398 : /*
4399 : * Now we may initialize the subplan.
4400 : */
4401 119014 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4402 :
4403 : /*
4404 : * Do additional per-result-relation initialization.
4405 : */
4406 240344 : for (i = 0; i < nrels; i++)
4407 : {
4408 121330 : resultRelInfo = &mtstate->resultRelInfo[i];
4409 :
4410 : /* Let FDWs init themselves for foreign-table result rels */
4411 121330 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4412 121122 : resultRelInfo->ri_FdwRoutine != NULL &&
4413 310 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4414 : {
4415 310 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4416 :
4417 310 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4418 : resultRelInfo,
4419 : fdw_private,
4420 : i,
4421 : eflags);
4422 : }
4423 :
4424 : /*
4425 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4426 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4427 : * tables, the FDW might have created additional junk attr(s), but
4428 : * those are no concern of ours.
4429 : */
4430 121330 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4431 : operation == CMD_MERGE)
4432 : {
4433 : char relkind;
4434 :
4435 29134 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4436 29134 : if (relkind == RELKIND_RELATION ||
4437 650 : relkind == RELKIND_MATVIEW ||
4438 : relkind == RELKIND_PARTITIONED_TABLE)
4439 : {
4440 28520 : resultRelInfo->ri_RowIdAttNo =
4441 28520 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4442 28520 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4443 0 : elog(ERROR, "could not find junk ctid column");
4444 : }
4445 614 : else if (relkind == RELKIND_FOREIGN_TABLE)
4446 : {
4447 : /*
4448 : * We don't support MERGE with foreign tables for now. (It's
4449 : * problematic because the implementation uses CTID.)
4450 : */
4451 : Assert(operation != CMD_MERGE);
4452 :
4453 : /*
4454 : * When there is a row-level trigger, there should be a
4455 : * wholerow attribute. We also require it to be present in
4456 : * UPDATE and MERGE, so we can get the values of unchanged
4457 : * columns.
4458 : */
4459 344 : resultRelInfo->ri_RowIdAttNo =
4460 344 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4461 : "wholerow");
4462 344 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4463 194 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4464 0 : elog(ERROR, "could not find junk wholerow column");
4465 : }
4466 : else
4467 : {
4468 : /* Other valid target relkinds must provide wholerow */
4469 270 : resultRelInfo->ri_RowIdAttNo =
4470 270 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4471 : "wholerow");
4472 270 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4473 0 : elog(ERROR, "could not find junk wholerow column");
4474 : }
4475 : }
4476 : }
4477 :
4478 : /*
4479 : * If this is an inherited update/delete/merge, there will be a junk
4480 : * attribute named "tableoid" present in the subplan's targetlist. It
4481 : * will be used to identify the result relation for a given tuple to be
4482 : * updated/deleted/merged.
4483 : */
4484 119014 : mtstate->mt_resultOidAttno =
4485 119014 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4486 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || nrels == 1);
4487 119014 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4488 119014 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4489 :
4490 : /* Get the root target relation */
4491 119014 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4492 :
4493 : /*
4494 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4495 : * or MERGE might need this too, but only if it actually moves tuples
4496 : * between partitions; in that case setup is done by
4497 : * ExecCrossPartitionUpdate.
4498 : */
4499 119014 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4500 : operation == CMD_INSERT)
4501 5858 : mtstate->mt_partition_tuple_routing =
4502 5858 : ExecSetupPartitionTupleRouting(estate, rel);
4503 :
4504 : /*
4505 : * Initialize any WITH CHECK OPTION constraints if needed.
4506 : */
4507 119014 : resultRelInfo = mtstate->resultRelInfo;
4508 120348 : foreach(l, node->withCheckOptionLists)
4509 : {
4510 1334 : List *wcoList = (List *) lfirst(l);
4511 1334 : List *wcoExprs = NIL;
4512 : ListCell *ll;
4513 :
4514 3632 : foreach(ll, wcoList)
4515 : {
4516 2298 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4517 2298 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4518 : &mtstate->ps);
4519 :
4520 2298 : wcoExprs = lappend(wcoExprs, wcoExpr);
4521 : }
4522 :
4523 1334 : resultRelInfo->ri_WithCheckOptions = wcoList;
4524 1334 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4525 1334 : resultRelInfo++;
4526 : }
4527 :
4528 : /*
4529 : * Initialize RETURNING projections if needed.
4530 : */
4531 119014 : if (node->returningLists)
4532 : {
4533 : TupleTableSlot *slot;
4534 : ExprContext *econtext;
4535 :
4536 : /*
4537 : * Initialize result tuple slot and assign its rowtype using the first
4538 : * RETURNING list. We assume the rest will look the same.
4539 : */
4540 4200 : mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
4541 :
4542 : /* Set up a slot for the output of the RETURNING projection(s) */
4543 4200 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
4544 4200 : slot = mtstate->ps.ps_ResultTupleSlot;
4545 :
4546 : /* Need an econtext too */
4547 4200 : if (mtstate->ps.ps_ExprContext == NULL)
4548 4200 : ExecAssignExprContext(estate, &mtstate->ps);
4549 4200 : econtext = mtstate->ps.ps_ExprContext;
4550 :
4551 : /*
4552 : * Build a projection for each result rel.
4553 : */
4554 4200 : resultRelInfo = mtstate->resultRelInfo;
4555 8728 : foreach(l, node->returningLists)
4556 : {
4557 4528 : List *rlist = (List *) lfirst(l);
4558 :
4559 4528 : resultRelInfo->ri_returningList = rlist;
4560 4528 : resultRelInfo->ri_projectReturning =
4561 4528 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
4562 4528 : resultRelInfo->ri_RelationDesc->rd_att);
4563 4528 : resultRelInfo++;
4564 : }
4565 : }
4566 : else
4567 : {
4568 : /*
4569 : * We still must construct a dummy result tuple type, because InitPlan
4570 : * expects one (maybe should change that?).
4571 : */
4572 114814 : mtstate->ps.plan->targetlist = NIL;
4573 114814 : ExecInitResultTypeTL(&mtstate->ps);
4574 :
4575 114814 : mtstate->ps.ps_ExprContext = NULL;
4576 : }
4577 :
4578 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
4579 119014 : resultRelInfo = mtstate->resultRelInfo;
4580 119014 : if (node->onConflictAction != ONCONFLICT_NONE)
4581 : {
4582 : /* insert may only have one relation, inheritance is not expanded */
4583 : Assert(nrels == 1);
4584 1344 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4585 : }
4586 :
4587 : /*
4588 : * If needed, Initialize target list, projection and qual for ON CONFLICT
4589 : * DO UPDATE.
4590 : */
4591 119014 : if (node->onConflictAction == ONCONFLICT_UPDATE)
4592 : {
4593 900 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
4594 : ExprContext *econtext;
4595 : TupleDesc relationDesc;
4596 :
4597 : /* already exists if created by RETURNING processing above */
4598 900 : if (mtstate->ps.ps_ExprContext == NULL)
4599 632 : ExecAssignExprContext(estate, &mtstate->ps);
4600 :
4601 900 : econtext = mtstate->ps.ps_ExprContext;
4602 900 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4603 :
4604 : /* create state for DO UPDATE SET operation */
4605 900 : resultRelInfo->ri_onConflict = onconfl;
4606 :
4607 : /* initialize slot for the existing tuple */
4608 900 : onconfl->oc_Existing =
4609 900 : table_slot_create(resultRelInfo->ri_RelationDesc,
4610 900 : &mtstate->ps.state->es_tupleTable);
4611 :
4612 : /*
4613 : * Create the tuple slot for the UPDATE SET projection. We want a slot
4614 : * of the table's type here, because the slot will be used to insert
4615 : * into the table, and for RETURNING processing - which may access
4616 : * system attributes.
4617 : */
4618 900 : onconfl->oc_ProjSlot =
4619 900 : table_slot_create(resultRelInfo->ri_RelationDesc,
4620 900 : &mtstate->ps.state->es_tupleTable);
4621 :
4622 : /* build UPDATE SET projection state */
4623 900 : onconfl->oc_ProjInfo =
4624 900 : ExecBuildUpdateProjection(node->onConflictSet,
4625 : true,
4626 : node->onConflictCols,
4627 : relationDesc,
4628 : econtext,
4629 : onconfl->oc_ProjSlot,
4630 : &mtstate->ps);
4631 :
4632 : /* initialize state to evaluate the WHERE clause, if any */
4633 900 : if (node->onConflictWhere)
4634 : {
4635 : ExprState *qualexpr;
4636 :
4637 176 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
4638 : &mtstate->ps);
4639 176 : onconfl->oc_WhereClause = qualexpr;
4640 : }
4641 : }
4642 :
4643 : /*
4644 : * If we have any secondary relations in an UPDATE or DELETE, they need to
4645 : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
4646 : * EvalPlanQual mechanism needs to be told about them. This also goes for
4647 : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
4648 : */
4649 119014 : arowmarks = NIL;
4650 121574 : foreach(l, node->rowMarks)
4651 : {
4652 2560 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
4653 : ExecRowMark *erm;
4654 : ExecAuxRowMark *aerm;
4655 :
4656 : /* ignore "parent" rowmarks; they are irrelevant at runtime */
4657 2560 : if (rc->isParent)
4658 100 : continue;
4659 :
4660 : /* Find ExecRowMark and build ExecAuxRowMark */
4661 2460 : erm = ExecFindRowMark(estate, rc->rti, false);
4662 2460 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4663 2460 : arowmarks = lappend(arowmarks, aerm);
4664 : }
4665 :
4666 : /* For a MERGE command, initialize its state */
4667 119014 : if (mtstate->operation == CMD_MERGE)
4668 1432 : ExecInitMerge(mtstate, estate);
4669 :
4670 119014 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4671 :
4672 : /*
4673 : * If there are a lot of result relations, use a hash table to speed the
4674 : * lookups. If there are not a lot, a simple linear search is faster.
4675 : *
4676 : * It's not clear where the threshold is, but try 64 for starters. In a
4677 : * debugging build, use a small threshold so that we get some test
4678 : * coverage of both code paths.
4679 : */
4680 : #ifdef USE_ASSERT_CHECKING
4681 : #define MT_NRELS_HASH 4
4682 : #else
4683 : #define MT_NRELS_HASH 64
4684 : #endif
4685 119014 : if (nrels >= MT_NRELS_HASH)
4686 : {
4687 : HASHCTL hash_ctl;
4688 :
4689 0 : hash_ctl.keysize = sizeof(Oid);
4690 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4691 0 : hash_ctl.hcxt = CurrentMemoryContext;
4692 0 : mtstate->mt_resultOidHash =
4693 0 : hash_create("ModifyTable target hash",
4694 : nrels, &hash_ctl,
4695 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
4696 0 : for (i = 0; i < nrels; i++)
4697 : {
4698 : Oid hashkey;
4699 : MTTargetRelLookup *mtlookup;
4700 : bool found;
4701 :
4702 0 : resultRelInfo = &mtstate->resultRelInfo[i];
4703 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
4704 : mtlookup = (MTTargetRelLookup *)
4705 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
4706 : HASH_ENTER, &found);
4707 : Assert(!found);
4708 0 : mtlookup->relationIndex = i;
4709 : }
4710 : }
4711 : else
4712 119014 : mtstate->mt_resultOidHash = NULL;
4713 :
4714 : /*
4715 : * Determine if the FDW supports batch insert and determine the batch size
4716 : * (a FDW may support batching, but it may be disabled for the
4717 : * server/table).
4718 : *
4719 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
4720 : * remains set to 0.
4721 : */
4722 119014 : if (operation == CMD_INSERT)
4723 : {
4724 : /* insert may only have one relation, inheritance is not expanded */
4725 : Assert(nrels == 1);
4726 92196 : resultRelInfo = mtstate->resultRelInfo;
4727 92196 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4728 92196 : resultRelInfo->ri_FdwRoutine != NULL &&
4729 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
4730 174 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
4731 : {
4732 174 : resultRelInfo->ri_BatchSize =
4733 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
4734 174 : Assert(resultRelInfo->ri_BatchSize >= 1);
4735 : }
4736 : else
4737 92022 : resultRelInfo->ri_BatchSize = 1;
4738 : }
4739 :
4740 : /*
4741 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
4742 : * to estate->es_auxmodifytables so that it will be run to completion by
4743 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
4744 : * ModifyTable node too, but there's no need.) Note the use of lcons not
4745 : * lappend: we need later-initialized ModifyTable nodes to be shut down
4746 : * before earlier ones. This ensures that we don't throw away RETURNING
4747 : * rows that need to be seen by a later CTE subplan.
4748 : */
4749 119014 : if (!mtstate->canSetTag)
4750 912 : estate->es_auxmodifytables = lcons(mtstate,
4751 : estate->es_auxmodifytables);
4752 :
4753 119014 : return mtstate;
4754 : }
4755 :
4756 : /* ----------------------------------------------------------------
4757 : * ExecEndModifyTable
4758 : *
4759 : * Shuts down the plan.
4760 : *
4761 : * Returns nothing of interest.
4762 : * ----------------------------------------------------------------
4763 : */
4764 : void
4765 114764 : ExecEndModifyTable(ModifyTableState *node)
4766 : {
4767 : int i;
4768 :
4769 : /*
4770 : * Allow any FDWs to shut down
4771 : */
4772 231542 : for (i = 0; i < node->mt_nrels; i++)
4773 : {
4774 : int j;
4775 116778 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
4776 :
4777 116778 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4778 116586 : resultRelInfo->ri_FdwRoutine != NULL &&
4779 290 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
4780 290 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
4781 : resultRelInfo);
4782 :
4783 : /*
4784 : * Cleanup the initialized batch slots. This only matters for FDWs
4785 : * with batching, but the other cases will have ri_NumSlotsInitialized
4786 : * == 0.
4787 : */
4788 116834 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
4789 : {
4790 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
4791 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
4792 : }
4793 : }
4794 :
4795 : /*
4796 : * Close all the partitioned tables, leaf partitions, and their indices
4797 : * and release the slot used for tuple routing, if set.
4798 : */
4799 114764 : if (node->mt_partition_tuple_routing)
4800 : {
4801 5870 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
4802 :
4803 5870 : if (node->mt_root_tuple_slot)
4804 566 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
4805 : }
4806 :
4807 : /*
4808 : * Terminate EPQ execution if active
4809 : */
4810 114764 : EvalPlanQualEnd(&node->mt_epqstate);
4811 :
4812 : /*
4813 : * shut down subplan
4814 : */
4815 114764 : ExecEndNode(outerPlanState(node));
4816 114764 : }
4817 :
4818 : void
4819 0 : ExecReScanModifyTable(ModifyTableState *node)
4820 : {
4821 : /*
4822 : * Currently, we don't need to support rescan on ModifyTable nodes. The
4823 : * semantics of that would be a bit debatable anyway.
4824 : */
4825 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
4826 : }
|