Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * MERGE runs a join between the source relation and the target
28 : * table; if any WHEN NOT MATCHED clauses are present, then the
29 : * join is an outer join. In this case, any unmatched tuples will
30 : * have NULL row-locating info, and only INSERT can be run. But for
31 : * matched tuples, then row-locating info is used to determine the
32 : * tuple to UPDATE or DELETE. When all clauses are WHEN MATCHED,
33 : * then an inner join is used, so all tuples contain row-locating info.
34 : *
35 : * If the query specifies RETURNING, then the ModifyTable returns a
36 : * RETURNING tuple after completing each row insert, update, or delete.
37 : * It must be called again to continue the operation. Without RETURNING,
38 : * we just loop within the node until all the work is done, then
39 : * return NULL. This avoids useless call/return overhead. (MERGE does
40 : * not support RETURNING.)
41 : */
42 :
43 : #include "postgres.h"
44 :
45 : #include "access/heapam.h"
46 : #include "access/htup_details.h"
47 : #include "access/tableam.h"
48 : #include "access/xact.h"
49 : #include "catalog/catalog.h"
50 : #include "commands/trigger.h"
51 : #include "executor/execPartition.h"
52 : #include "executor/executor.h"
53 : #include "executor/nodeModifyTable.h"
54 : #include "foreign/fdwapi.h"
55 : #include "miscadmin.h"
56 : #include "nodes/nodeFuncs.h"
57 : #include "optimizer/optimizer.h"
58 : #include "rewrite/rewriteHandler.h"
59 : #include "storage/bufmgr.h"
60 : #include "storage/lmgr.h"
61 : #include "utils/builtins.h"
62 : #include "utils/datum.h"
63 : #include "utils/memutils.h"
64 : #include "utils/rel.h"
65 :
66 :
67 : typedef struct MTTargetRelLookup
68 : {
69 : Oid relationOid; /* hash key, must be first */
70 : int relationIndex; /* rel's index in resultRelInfo[] array */
71 : } MTTargetRelLookup;
72 :
73 : /*
74 : * Context struct for a ModifyTable operation, containing basic execution
75 : * state and some output variables populated by ExecUpdateAct() and
76 : * ExecDeleteAct() to report the result of their actions to callers.
77 : */
78 : typedef struct ModifyTableContext
79 : {
80 : /* Operation state */
81 : ModifyTableState *mtstate;
82 : EPQState *epqstate;
83 : EState *estate;
84 :
85 : /*
86 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
87 : * access "junk" columns that are not going to be stored.
88 : */
89 : TupleTableSlot *planSlot;
90 :
91 : /* MERGE specific */
92 : MergeActionState *relaction; /* MERGE action in progress */
93 :
94 : /*
95 : * Information about the changes that were made concurrently to a tuple
96 : * being updated or deleted
97 : */
98 : TM_FailureData tmfd;
99 :
100 : /*
101 : * The tuple projected by the INSERT's RETURNING clause, when doing a
102 : * cross-partition UPDATE
103 : */
104 : TupleTableSlot *cpUpdateReturningSlot;
105 : } ModifyTableContext;
106 :
107 : /*
108 : * Context struct containing output data specific to UPDATE operations.
109 : */
110 : typedef struct UpdateContext
111 : {
112 : bool updated; /* did UPDATE actually occur? */
113 : bool crossPartUpdate; /* was it a cross-partition update? */
114 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
115 :
116 : /*
117 : * Lock mode to acquire on the latest tuple version before performing
118 : * EvalPlanQual on it
119 : */
120 : LockTupleMode lockmode;
121 : } UpdateContext;
122 :
123 :
124 : static void ExecBatchInsert(ModifyTableState *mtstate,
125 : ResultRelInfo *resultRelInfo,
126 : TupleTableSlot **slots,
127 : TupleTableSlot **planSlots,
128 : int numSlots,
129 : EState *estate,
130 : bool canSetTag);
131 : static void ExecPendingInserts(EState *estate);
132 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
133 : ResultRelInfo *sourcePartInfo,
134 : ResultRelInfo *destPartInfo,
135 : ItemPointer tupleid,
136 : TupleTableSlot *oldslot,
137 : TupleTableSlot *newslot);
138 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
139 : ResultRelInfo *resultRelInfo,
140 : ItemPointer conflictTid,
141 : TupleTableSlot *excludedSlot,
142 : bool canSetTag,
143 : TupleTableSlot **returning);
144 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
145 : EState *estate,
146 : PartitionTupleRouting *proute,
147 : ResultRelInfo *targetRelInfo,
148 : TupleTableSlot *slot,
149 : ResultRelInfo **partRelInfo);
150 :
151 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
152 : ResultRelInfo *resultRelInfo,
153 : ItemPointer tupleid,
154 : bool canSetTag);
155 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
156 : static bool ExecMergeMatched(ModifyTableContext *context,
157 : ResultRelInfo *resultRelInfo,
158 : ItemPointer tupleid,
159 : bool canSetTag);
160 : static void ExecMergeNotMatched(ModifyTableContext *context,
161 : ResultRelInfo *resultRelInfo,
162 : bool canSetTag);
163 :
164 :
165 : /*
166 : * Verify that the tuples to be produced by INSERT match the
167 : * target relation's rowtype
168 : *
169 : * We do this to guard against stale plans. If plan invalidation is
170 : * functioning properly then we should never get a failure here, but better
171 : * safe than sorry. Note that this is called after we have obtained lock
172 : * on the target rel, so the rowtype can't change underneath us.
173 : *
174 : * The plan output is represented by its targetlist, because that makes
175 : * handling the dropped-column case easier.
176 : *
177 : * We used to use this for UPDATE as well, but now the equivalent checks
178 : * are done in ExecBuildUpdateProjection.
179 : */
180 : static void
181 102398 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
182 : {
183 102398 : TupleDesc resultDesc = RelationGetDescr(resultRel);
184 102398 : int attno = 0;
185 : ListCell *lc;
186 :
187 400022 : foreach(lc, targetList)
188 : {
189 297624 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
190 : Form_pg_attribute attr;
191 :
192 : Assert(!tle->resjunk); /* caller removed junk items already */
193 :
194 297624 : if (attno >= resultDesc->natts)
195 0 : ereport(ERROR,
196 : (errcode(ERRCODE_DATATYPE_MISMATCH),
197 : errmsg("table row type and query-specified row type do not match"),
198 : errdetail("Query has too many columns.")));
199 297624 : attr = TupleDescAttr(resultDesc, attno);
200 297624 : attno++;
201 :
202 297624 : if (!attr->attisdropped)
203 : {
204 : /* Normal case: demand type match */
205 297014 : if (exprType((Node *) tle->expr) != attr->atttypid)
206 0 : ereport(ERROR,
207 : (errcode(ERRCODE_DATATYPE_MISMATCH),
208 : errmsg("table row type and query-specified row type do not match"),
209 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
210 : format_type_be(attr->atttypid),
211 : attno,
212 : format_type_be(exprType((Node *) tle->expr)))));
213 : }
214 : else
215 : {
216 : /*
217 : * For a dropped column, we can't check atttypid (it's likely 0).
218 : * In any case the planner has most likely inserted an INT4 null.
219 : * What we insist on is just *some* NULL constant.
220 : */
221 610 : if (!IsA(tle->expr, Const) ||
222 610 : !((Const *) tle->expr)->constisnull)
223 0 : ereport(ERROR,
224 : (errcode(ERRCODE_DATATYPE_MISMATCH),
225 : errmsg("table row type and query-specified row type do not match"),
226 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
227 : attno)));
228 : }
229 : }
230 102398 : if (attno != resultDesc->natts)
231 0 : ereport(ERROR,
232 : (errcode(ERRCODE_DATATYPE_MISMATCH),
233 : errmsg("table row type and query-specified row type do not match"),
234 : errdetail("Query has too few columns.")));
235 102398 : }
236 :
237 : /*
238 : * ExecProcessReturning --- evaluate a RETURNING list
239 : *
240 : * resultRelInfo: current result rel
241 : * tupleSlot: slot holding tuple actually inserted/updated/deleted
242 : * planSlot: slot holding tuple returned by top subplan node
243 : *
244 : * Note: If tupleSlot is NULL, the FDW should have already provided econtext's
245 : * scan tuple.
246 : *
247 : * Returns a slot holding the result tuple
248 : */
249 : static TupleTableSlot *
250 7018 : ExecProcessReturning(ResultRelInfo *resultRelInfo,
251 : TupleTableSlot *tupleSlot,
252 : TupleTableSlot *planSlot)
253 : {
254 7018 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
255 7018 : ExprContext *econtext = projectReturning->pi_exprContext;
256 :
257 : /* Make tuple and any needed join variables available to ExecProject */
258 7018 : if (tupleSlot)
259 6324 : econtext->ecxt_scantuple = tupleSlot;
260 7018 : econtext->ecxt_outertuple = planSlot;
261 :
262 : /*
263 : * RETURNING expressions might reference the tableoid column, so
264 : * reinitialize tts_tableOid before evaluating them.
265 : */
266 7018 : econtext->ecxt_scantuple->tts_tableOid =
267 7018 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
268 :
269 : /* Compute the RETURNING expressions */
270 7018 : return ExecProject(projectReturning);
271 : }
272 :
273 : /*
274 : * ExecCheckTupleVisible -- verify tuple is visible
275 : *
276 : * It would not be consistent with guarantees of the higher isolation levels to
277 : * proceed with avoiding insertion (taking speculative insertion's alternative
278 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
279 : * Check for the need to raise a serialization failure, and do so as necessary.
280 : */
281 : static void
282 5240 : ExecCheckTupleVisible(EState *estate,
283 : Relation rel,
284 : TupleTableSlot *slot)
285 : {
286 5240 : if (!IsolationUsesXactSnapshot())
287 5176 : return;
288 :
289 64 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
290 : {
291 : Datum xminDatum;
292 : TransactionId xmin;
293 : bool isnull;
294 :
295 40 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
296 : Assert(!isnull);
297 40 : xmin = DatumGetTransactionId(xminDatum);
298 :
299 : /*
300 : * We should not raise a serialization failure if the conflict is
301 : * against a tuple inserted by our own transaction, even if it's not
302 : * visible to our snapshot. (This would happen, for example, if
303 : * conflicting keys are proposed for insertion in a single command.)
304 : */
305 40 : if (!TransactionIdIsCurrentTransactionId(xmin))
306 20 : ereport(ERROR,
307 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
308 : errmsg("could not serialize access due to concurrent update")));
309 : }
310 : }
311 :
312 : /*
313 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
314 : */
315 : static void
316 158 : ExecCheckTIDVisible(EState *estate,
317 : ResultRelInfo *relinfo,
318 : ItemPointer tid,
319 : TupleTableSlot *tempSlot)
320 : {
321 158 : Relation rel = relinfo->ri_RelationDesc;
322 :
323 : /* Redundantly check isolation level */
324 158 : if (!IsolationUsesXactSnapshot())
325 94 : return;
326 :
327 64 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
328 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
329 64 : ExecCheckTupleVisible(estate, rel, tempSlot);
330 44 : ExecClearTuple(tempSlot);
331 : }
332 :
333 : /*
334 : * Initialize to compute stored generated columns for a tuple
335 : *
336 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI
337 : * or ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
338 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
339 : *
340 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
341 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
342 : * cross-partition UPDATEs, since a partition might be the target of both
343 : * UPDATE and INSERT actions.
344 : */
345 : void
346 59460 : ExecInitStoredGenerated(ResultRelInfo *resultRelInfo,
347 : EState *estate,
348 : CmdType cmdtype)
349 : {
350 59460 : Relation rel = resultRelInfo->ri_RelationDesc;
351 59460 : TupleDesc tupdesc = RelationGetDescr(rel);
352 59460 : int natts = tupdesc->natts;
353 : ExprState **ri_GeneratedExprs;
354 : int ri_NumGeneratedNeeded;
355 : Bitmapset *updatedCols;
356 : MemoryContext oldContext;
357 :
358 : /* Nothing to do if no generated columns */
359 59460 : if (!(tupdesc->constr && tupdesc->constr->has_generated_stored))
360 58550 : return;
361 :
362 : /*
363 : * In an UPDATE, we can skip computing any generated columns that do not
364 : * depend on any UPDATE target column. But if there is a BEFORE ROW
365 : * UPDATE trigger, we cannot skip because the trigger might change more
366 : * columns.
367 : */
368 910 : if (cmdtype == CMD_UPDATE &&
369 228 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
370 202 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
371 : else
372 708 : updatedCols = NULL;
373 :
374 : /*
375 : * Make sure these data structures are built in the per-query memory
376 : * context so they'll survive throughout the query.
377 : */
378 910 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
379 :
380 910 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
381 910 : ri_NumGeneratedNeeded = 0;
382 :
383 3502 : for (int i = 0; i < natts; i++)
384 : {
385 2592 : if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
386 : {
387 : Expr *expr;
388 :
389 : /* Fetch the GENERATED AS expression tree */
390 930 : expr = (Expr *) build_column_default(rel, i + 1);
391 930 : if (expr == NULL)
392 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
393 : i + 1, RelationGetRelationName(rel));
394 :
395 : /*
396 : * If it's an update with a known set of update target columns,
397 : * see if we can skip the computation.
398 : */
399 930 : if (updatedCols)
400 : {
401 208 : Bitmapset *attrs_used = NULL;
402 :
403 208 : pull_varattnos((Node *) expr, 1, &attrs_used);
404 :
405 208 : if (!bms_overlap(updatedCols, attrs_used))
406 24 : continue; /* need not update this column */
407 : }
408 :
409 : /* No luck, so prepare the expression for execution */
410 906 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
411 906 : ri_NumGeneratedNeeded++;
412 :
413 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
414 906 : if (cmdtype == CMD_UPDATE)
415 210 : resultRelInfo->ri_extraUpdatedCols =
416 210 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
417 : i + 1 - FirstLowInvalidHeapAttributeNumber);
418 : }
419 : }
420 :
421 : /* Save in appropriate set of fields */
422 910 : if (cmdtype == CMD_UPDATE)
423 : {
424 : /* Don't call twice */
425 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
426 :
427 228 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
428 228 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
429 : }
430 : else
431 : {
432 : /* Don't call twice */
433 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
434 :
435 682 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
436 682 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
437 : }
438 :
439 910 : MemoryContextSwitchTo(oldContext);
440 : }
441 :
442 : /*
443 : * Compute stored generated columns for a tuple
444 : */
445 : void
446 1202 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
447 : EState *estate, TupleTableSlot *slot,
448 : CmdType cmdtype)
449 : {
450 1202 : Relation rel = resultRelInfo->ri_RelationDesc;
451 1202 : TupleDesc tupdesc = RelationGetDescr(rel);
452 1202 : int natts = tupdesc->natts;
453 1202 : ExprContext *econtext = GetPerTupleExprContext(estate);
454 : ExprState **ri_GeneratedExprs;
455 : MemoryContext oldContext;
456 : Datum *values;
457 : bool *nulls;
458 :
459 : /* We should not be called unless this is true */
460 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
461 :
462 : /*
463 : * Initialize the expressions if we didn't already, and check whether we
464 : * can exit early because nothing needs to be computed.
465 : */
466 1202 : if (cmdtype == CMD_UPDATE)
467 : {
468 264 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
469 202 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
470 264 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
471 18 : return;
472 246 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
473 : }
474 : else
475 : {
476 938 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
477 682 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
478 : /* Early exit is impossible given the prior Assert */
479 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
480 938 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
481 : }
482 :
483 1184 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
484 :
485 1184 : values = palloc(sizeof(*values) * natts);
486 1184 : nulls = palloc(sizeof(*nulls) * natts);
487 :
488 1184 : slot_getallattrs(slot);
489 1184 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
490 :
491 4442 : for (int i = 0; i < natts; i++)
492 : {
493 3270 : Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
494 :
495 3270 : if (ri_GeneratedExprs[i])
496 : {
497 : Datum val;
498 : bool isnull;
499 :
500 : Assert(attr->attgenerated == ATTRIBUTE_GENERATED_STORED);
501 :
502 1198 : econtext->ecxt_scantuple = slot;
503 :
504 1198 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
505 :
506 : /*
507 : * We must make a copy of val as we have no guarantees about where
508 : * memory for a pass-by-reference Datum is located.
509 : */
510 1186 : if (!isnull)
511 1144 : val = datumCopy(val, attr->attbyval, attr->attlen);
512 :
513 1186 : values[i] = val;
514 1186 : nulls[i] = isnull;
515 : }
516 : else
517 : {
518 2072 : if (!nulls[i])
519 2030 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
520 : }
521 : }
522 :
523 1172 : ExecClearTuple(slot);
524 1172 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
525 1172 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
526 1172 : ExecStoreVirtualTuple(slot);
527 1172 : ExecMaterializeSlot(slot);
528 :
529 1172 : MemoryContextSwitchTo(oldContext);
530 : }
531 :
532 : /*
533 : * ExecInitInsertProjection
534 : * Do one-time initialization of projection data for INSERT tuples.
535 : *
536 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
537 : *
538 : * This is also a convenient place to verify that the
539 : * output of an INSERT matches the target table.
540 : */
541 : static void
542 101772 : ExecInitInsertProjection(ModifyTableState *mtstate,
543 : ResultRelInfo *resultRelInfo)
544 : {
545 101772 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
546 101772 : Plan *subplan = outerPlan(node);
547 101772 : EState *estate = mtstate->ps.state;
548 101772 : List *insertTargetList = NIL;
549 101772 : bool need_projection = false;
550 : ListCell *l;
551 :
552 : /* Extract non-junk columns of the subplan's result tlist. */
553 397782 : foreach(l, subplan->targetlist)
554 : {
555 296010 : TargetEntry *tle = (TargetEntry *) lfirst(l);
556 :
557 296010 : if (!tle->resjunk)
558 296010 : insertTargetList = lappend(insertTargetList, tle);
559 : else
560 0 : need_projection = true;
561 : }
562 :
563 : /*
564 : * The junk-free list must produce a tuple suitable for the result
565 : * relation.
566 : */
567 101772 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
568 :
569 : /* We'll need a slot matching the table's format. */
570 101772 : resultRelInfo->ri_newTupleSlot =
571 101772 : table_slot_create(resultRelInfo->ri_RelationDesc,
572 : &estate->es_tupleTable);
573 :
574 : /* Build ProjectionInfo if needed (it probably isn't). */
575 101772 : if (need_projection)
576 : {
577 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
578 :
579 : /* need an expression context to do the projection */
580 0 : if (mtstate->ps.ps_ExprContext == NULL)
581 0 : ExecAssignExprContext(estate, &mtstate->ps);
582 :
583 0 : resultRelInfo->ri_projectNew =
584 0 : ExecBuildProjectionInfo(insertTargetList,
585 : mtstate->ps.ps_ExprContext,
586 : resultRelInfo->ri_newTupleSlot,
587 : &mtstate->ps,
588 : relDesc);
589 : }
590 :
591 101772 : resultRelInfo->ri_projectNewInfoValid = true;
592 101772 : }
593 :
594 : /*
595 : * ExecInitUpdateProjection
596 : * Do one-time initialization of projection data for UPDATE tuples.
597 : *
598 : * UPDATE always needs a projection, because (1) there's always some junk
599 : * attrs, and (2) we may need to merge values of not-updated columns from
600 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
601 : * the subplan contains only new values for the changed columns, plus row
602 : * identity info in the junk attrs.
603 : *
604 : * This is "one-time" for any given result rel, but we might touch more than
605 : * one result rel in the course of an inherited UPDATE, and each one needs
606 : * its own projection due to possible column order variation.
607 : *
608 : * This is also a convenient place to verify that the output of an UPDATE
609 : * matches the target table (ExecBuildUpdateProjection does that).
610 : */
611 : static void
612 15206 : ExecInitUpdateProjection(ModifyTableState *mtstate,
613 : ResultRelInfo *resultRelInfo)
614 : {
615 15206 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
616 15206 : Plan *subplan = outerPlan(node);
617 15206 : EState *estate = mtstate->ps.state;
618 15206 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
619 : int whichrel;
620 : List *updateColnos;
621 :
622 : /*
623 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
624 : * to, we can get the index the hard way with an integer division.
625 : */
626 15206 : whichrel = mtstate->mt_lastResultIndex;
627 15206 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
628 : {
629 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
630 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
631 : }
632 :
633 15206 : updateColnos = (List *) list_nth(node->updateColnosLists, whichrel);
634 :
635 : /*
636 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
637 : * produced by the subplan to get the new tuple. We need two slots, both
638 : * matching the table's desired format.
639 : */
640 15206 : resultRelInfo->ri_oldTupleSlot =
641 15206 : table_slot_create(resultRelInfo->ri_RelationDesc,
642 : &estate->es_tupleTable);
643 15206 : resultRelInfo->ri_newTupleSlot =
644 15206 : table_slot_create(resultRelInfo->ri_RelationDesc,
645 : &estate->es_tupleTable);
646 :
647 : /* need an expression context to do the projection */
648 15206 : if (mtstate->ps.ps_ExprContext == NULL)
649 14036 : ExecAssignExprContext(estate, &mtstate->ps);
650 :
651 15206 : resultRelInfo->ri_projectNew =
652 15206 : ExecBuildUpdateProjection(subplan->targetlist,
653 : false, /* subplan did the evaluation */
654 : updateColnos,
655 : relDesc,
656 : mtstate->ps.ps_ExprContext,
657 : resultRelInfo->ri_newTupleSlot,
658 : &mtstate->ps);
659 :
660 15206 : resultRelInfo->ri_projectNewInfoValid = true;
661 15206 : }
662 :
663 : /*
664 : * ExecGetInsertNewTuple
665 : * This prepares a "new" tuple ready to be inserted into given result
666 : * relation, by removing any junk columns of the plan's output tuple
667 : * and (if necessary) coercing the tuple to the right tuple format.
668 : */
669 : static TupleTableSlot *
670 12042612 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
671 : TupleTableSlot *planSlot)
672 : {
673 12042612 : ProjectionInfo *newProj = relinfo->ri_projectNew;
674 : ExprContext *econtext;
675 :
676 : /*
677 : * If there's no projection to be done, just make sure the slot is of the
678 : * right type for the target rel. If the planSlot is the right type we
679 : * can use it as-is, else copy the data into ri_newTupleSlot.
680 : */
681 12042612 : if (newProj == NULL)
682 : {
683 12042612 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
684 : {
685 11274712 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
686 11274712 : return relinfo->ri_newTupleSlot;
687 : }
688 : else
689 767900 : return planSlot;
690 : }
691 :
692 : /*
693 : * Else project; since the projection output slot is ri_newTupleSlot, this
694 : * will also fix any slot-type problem.
695 : *
696 : * Note: currently, this is dead code, because INSERT cases don't receive
697 : * any junk columns so there's never a projection to be done.
698 : */
699 0 : econtext = newProj->pi_exprContext;
700 0 : econtext->ecxt_outertuple = planSlot;
701 0 : return ExecProject(newProj);
702 : }
703 :
704 : /*
705 : * ExecGetUpdateNewTuple
706 : * This prepares a "new" tuple by combining an UPDATE subplan's output
707 : * tuple (which contains values of changed columns) with unchanged
708 : * columns taken from the old tuple.
709 : *
710 : * The subplan tuple might also contain junk columns, which are ignored.
711 : * Note that the projection also ensures we have a slot of the right type.
712 : */
713 : TupleTableSlot *
714 366792 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
715 : TupleTableSlot *planSlot,
716 : TupleTableSlot *oldSlot)
717 : {
718 366792 : ProjectionInfo *newProj = relinfo->ri_projectNew;
719 : ExprContext *econtext;
720 :
721 : /* Use a few extra Asserts to protect against outside callers */
722 : Assert(relinfo->ri_projectNewInfoValid);
723 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
724 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
725 :
726 366792 : econtext = newProj->pi_exprContext;
727 366792 : econtext->ecxt_outertuple = planSlot;
728 366792 : econtext->ecxt_scantuple = oldSlot;
729 366792 : return ExecProject(newProj);
730 : }
731 :
732 : /* ----------------------------------------------------------------
733 : * ExecInsert
734 : *
735 : * For INSERT, we have to insert the tuple into the target relation
736 : * (or partition thereof) and insert appropriate tuples into the index
737 : * relations.
738 : *
739 : * slot contains the new tuple value to be stored.
740 : *
741 : * Returns RETURNING result if any, otherwise NULL.
742 : * *inserted_tuple is the tuple that's effectively inserted;
743 : * *insert_destrel is the relation where it was inserted.
744 : * These are only set on success.
745 : *
746 : * This may change the currently active tuple conversion map in
747 : * mtstate->mt_transition_capture, so the callers must take care to
748 : * save the previous value to avoid losing track of it.
749 : * ----------------------------------------------------------------
750 : */
751 : static TupleTableSlot *
752 12044928 : ExecInsert(ModifyTableContext *context,
753 : ResultRelInfo *resultRelInfo,
754 : TupleTableSlot *slot,
755 : bool canSetTag,
756 : TupleTableSlot **inserted_tuple,
757 : ResultRelInfo **insert_destrel)
758 : {
759 12044928 : ModifyTableState *mtstate = context->mtstate;
760 12044928 : EState *estate = context->estate;
761 : Relation resultRelationDesc;
762 12044928 : List *recheckIndexes = NIL;
763 12044928 : TupleTableSlot *planSlot = context->planSlot;
764 12044928 : TupleTableSlot *result = NULL;
765 : TransitionCaptureState *ar_insert_trig_tcs;
766 12044928 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
767 12044928 : OnConflictAction onconflict = node->onConflictAction;
768 12044928 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
769 : MemoryContext oldContext;
770 :
771 : /*
772 : * If the input result relation is a partitioned table, find the leaf
773 : * partition to insert the tuple into.
774 : */
775 12044928 : if (proute)
776 : {
777 : ResultRelInfo *partRelInfo;
778 :
779 721036 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
780 : resultRelInfo, slot,
781 : &partRelInfo);
782 720838 : resultRelInfo = partRelInfo;
783 : }
784 :
785 12044730 : ExecMaterializeSlot(slot);
786 :
787 12044730 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
788 :
789 : /*
790 : * Open the table's indexes, if we have not done so already, so that we
791 : * can add new index entries for the inserted tuple.
792 : */
793 12044730 : if (resultRelationDesc->rd_rel->relhasindex &&
794 3247206 : resultRelInfo->ri_IndexRelationDescs == NULL)
795 31210 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
796 :
797 : /*
798 : * BEFORE ROW INSERT Triggers.
799 : *
800 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
801 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
802 : * violations before firing these triggers, because they can change the
803 : * values to insert. Also, they can run arbitrary user-defined code with
804 : * side-effects that we can't cancel by just not inserting the tuple.
805 : */
806 12044730 : if (resultRelInfo->ri_TrigDesc &&
807 74382 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
808 : {
809 : /* Flush any pending inserts, so rows are visible to the triggers */
810 2036 : if (estate->es_insert_pending_result_relations != NIL)
811 6 : ExecPendingInserts(estate);
812 :
813 2036 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
814 134 : return NULL; /* "do nothing" */
815 : }
816 :
817 : /* INSTEAD OF ROW INSERT Triggers */
818 12044480 : if (resultRelInfo->ri_TrigDesc &&
819 74132 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
820 : {
821 138 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
822 6 : return NULL; /* "do nothing" */
823 : }
824 12044342 : else if (resultRelInfo->ri_FdwRoutine)
825 : {
826 : /*
827 : * GENERATED expressions might reference the tableoid column, so
828 : * (re-)initialize tts_tableOid before evaluating them.
829 : */
830 142014 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
831 :
832 : /*
833 : * Compute stored generated columns
834 : */
835 142014 : if (resultRelationDesc->rd_att->constr &&
836 366 : resultRelationDesc->rd_att->constr->has_generated_stored)
837 8 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
838 : CMD_INSERT);
839 :
840 : /*
841 : * If the FDW supports batching, and batching is requested, accumulate
842 : * rows and insert them in batches. Otherwise use the per-row inserts.
843 : */
844 142014 : if (resultRelInfo->ri_BatchSize > 1)
845 : {
846 140288 : bool flushed = false;
847 :
848 : /*
849 : * When we've reached the desired batch size, perform the
850 : * insertion.
851 : */
852 140288 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
853 : {
854 22 : ExecBatchInsert(mtstate, resultRelInfo,
855 : resultRelInfo->ri_Slots,
856 : resultRelInfo->ri_PlanSlots,
857 : resultRelInfo->ri_NumSlots,
858 : estate, canSetTag);
859 22 : flushed = true;
860 : }
861 :
862 140288 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
863 :
864 140288 : if (resultRelInfo->ri_Slots == NULL)
865 : {
866 60 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
867 30 : resultRelInfo->ri_BatchSize);
868 30 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
869 30 : resultRelInfo->ri_BatchSize);
870 : }
871 :
872 : /*
873 : * Initialize the batch slots. We don't know how many slots will
874 : * be needed, so we initialize them as the batch grows, and we
875 : * keep them across batches. To mitigate an inefficiency in how
876 : * resource owner handles objects with many references (as with
877 : * many slots all referencing the same tuple descriptor) we copy
878 : * the appropriate tuple descriptor for each slot.
879 : */
880 140288 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
881 : {
882 131212 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
883 : TupleDesc plan_tdesc =
884 131212 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
885 :
886 262424 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
887 131212 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
888 :
889 262424 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
890 131212 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
891 :
892 : /* remember how many batch slots we initialized */
893 131212 : resultRelInfo->ri_NumSlotsInitialized++;
894 : }
895 :
896 140288 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
897 : slot);
898 :
899 140288 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
900 : planSlot);
901 :
902 : /*
903 : * If these are the first tuples stored in the buffers, add the
904 : * target rel and the mtstate to the
905 : * es_insert_pending_result_relations and
906 : * es_insert_pending_modifytables lists respectively, except in
907 : * the case where flushing was done above, in which case they
908 : * would already have been added to the lists, so no need to do
909 : * this.
910 : */
911 140288 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
912 : {
913 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
914 : resultRelInfo));
915 38 : estate->es_insert_pending_result_relations =
916 38 : lappend(estate->es_insert_pending_result_relations,
917 : resultRelInfo);
918 38 : estate->es_insert_pending_modifytables =
919 38 : lappend(estate->es_insert_pending_modifytables, mtstate);
920 : }
921 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
922 : resultRelInfo));
923 :
924 140288 : resultRelInfo->ri_NumSlots++;
925 :
926 140288 : MemoryContextSwitchTo(oldContext);
927 :
928 140288 : return NULL;
929 : }
930 :
931 : /*
932 : * insert into foreign table: let the FDW do it
933 : */
934 1726 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
935 : resultRelInfo,
936 : slot,
937 : planSlot);
938 :
939 1720 : if (slot == NULL) /* "do nothing" */
940 4 : return NULL;
941 :
942 : /*
943 : * AFTER ROW Triggers or RETURNING expressions might reference the
944 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
945 : * them. (This covers the case where the FDW replaced the slot.)
946 : */
947 1716 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
948 : }
949 : else
950 : {
951 : WCOKind wco_kind;
952 :
953 : /*
954 : * Constraints and GENERATED expressions might reference the tableoid
955 : * column, so (re-)initialize tts_tableOid before evaluating them.
956 : */
957 11902328 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
958 :
959 : /*
960 : * Compute stored generated columns
961 : */
962 11902328 : if (resultRelationDesc->rd_att->constr &&
963 3543080 : resultRelationDesc->rd_att->constr->has_generated_stored)
964 886 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
965 : CMD_INSERT);
966 :
967 : /*
968 : * Check any RLS WITH CHECK policies.
969 : *
970 : * Normally we should check INSERT policies. But if the insert is the
971 : * result of a partition key update that moved the tuple to a new
972 : * partition, we should instead check UPDATE policies, because we are
973 : * executing policies defined on the target table, and not those
974 : * defined on the child partitions.
975 : *
976 : * If we're running MERGE, we refer to the action that we're executing
977 : * to know if we're doing an INSERT or UPDATE to a partition table.
978 : */
979 11902316 : if (mtstate->operation == CMD_UPDATE)
980 680 : wco_kind = WCO_RLS_UPDATE_CHECK;
981 11901636 : else if (mtstate->operation == CMD_MERGE)
982 1498 : wco_kind = (context->relaction->mas_action->commandType == CMD_UPDATE) ?
983 1498 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
984 : else
985 11900138 : wco_kind = WCO_RLS_INSERT_CHECK;
986 :
987 : /*
988 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
989 : * we are looking for at this point.
990 : */
991 11902316 : if (resultRelInfo->ri_WithCheckOptions != NIL)
992 522 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
993 :
994 : /*
995 : * Check the constraints of the tuple.
996 : */
997 11902148 : if (resultRelationDesc->rd_att->constr)
998 3542990 : ExecConstraints(resultRelInfo, slot, estate);
999 :
1000 : /*
1001 : * Also check the tuple against the partition constraint, if there is
1002 : * one; except that if we got here via tuple-routing, we don't need to
1003 : * if there's no BR trigger defined on the partition.
1004 : */
1005 11901574 : if (resultRelationDesc->rd_rel->relispartition &&
1006 725078 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1007 720332 : (resultRelInfo->ri_TrigDesc &&
1008 1268 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1009 4930 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1010 :
1011 11901406 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1012 4010 : {
1013 : /* Perform a speculative insertion. */
1014 : uint32 specToken;
1015 : ItemPointerData conflictTid;
1016 : bool specConflict;
1017 : List *arbiterIndexes;
1018 :
1019 9380 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1020 :
1021 : /*
1022 : * Do a non-conclusive check for conflicts first.
1023 : *
1024 : * We're not holding any locks yet, so this doesn't guarantee that
1025 : * the later insert won't conflict. But it avoids leaving behind
1026 : * a lot of canceled speculative insertions, if you run a lot of
1027 : * INSERT ON CONFLICT statements that do conflict.
1028 : *
1029 : * We loop back here if we find a conflict below, either during
1030 : * the pre-check, or when we re-check after inserting the tuple
1031 : * speculatively. Better allow interrupts in case some bug makes
1032 : * this an infinite loop.
1033 : */
1034 9390 : vlock:
1035 9390 : CHECK_FOR_INTERRUPTS();
1036 9390 : specConflict = false;
1037 9390 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1038 : &conflictTid, arbiterIndexes))
1039 : {
1040 : /* committed conflict tuple found */
1041 5358 : if (onconflict == ONCONFLICT_UPDATE)
1042 : {
1043 : /*
1044 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1045 : * part. Be prepared to retry if the UPDATE fails because
1046 : * of another concurrent UPDATE/DELETE to the conflict
1047 : * tuple.
1048 : */
1049 5200 : TupleTableSlot *returning = NULL;
1050 :
1051 5200 : if (ExecOnConflictUpdate(context, resultRelInfo,
1052 : &conflictTid, slot, canSetTag,
1053 : &returning))
1054 : {
1055 5122 : InstrCountTuples2(&mtstate->ps, 1);
1056 5122 : return returning;
1057 : }
1058 : else
1059 0 : goto vlock;
1060 : }
1061 : else
1062 : {
1063 : /*
1064 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1065 : * verify that the tuple is visible to the executor's MVCC
1066 : * snapshot at higher isolation levels.
1067 : *
1068 : * Using ExecGetReturningSlot() to store the tuple for the
1069 : * recheck isn't that pretty, but we can't trivially use
1070 : * the input slot, because it might not be of a compatible
1071 : * type. As there's no conflicting usage of
1072 : * ExecGetReturningSlot() in the DO NOTHING case...
1073 : */
1074 : Assert(onconflict == ONCONFLICT_NOTHING);
1075 158 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1076 : ExecGetReturningSlot(estate, resultRelInfo));
1077 138 : InstrCountTuples2(&mtstate->ps, 1);
1078 138 : return NULL;
1079 : }
1080 : }
1081 :
1082 : /*
1083 : * Before we start insertion proper, acquire our "speculative
1084 : * insertion lock". Others can use that to wait for us to decide
1085 : * if we're going to go ahead with the insertion, instead of
1086 : * waiting for the whole transaction to complete.
1087 : */
1088 4026 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1089 :
1090 : /* insert the tuple, with the speculative token */
1091 4026 : table_tuple_insert_speculative(resultRelationDesc, slot,
1092 : estate->es_output_cid,
1093 : 0,
1094 : NULL,
1095 : specToken);
1096 :
1097 : /* insert index entries for tuple */
1098 4026 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1099 : slot, estate, false, true,
1100 : &specConflict,
1101 : arbiterIndexes,
1102 : false);
1103 :
1104 : /* adjust the tuple's state accordingly */
1105 4020 : table_tuple_complete_speculative(resultRelationDesc, slot,
1106 4020 : specToken, !specConflict);
1107 :
1108 : /*
1109 : * Wake up anyone waiting for our decision. They will re-check
1110 : * the tuple, see that it's no longer speculative, and wait on our
1111 : * XID as if this was a regularly inserted tuple all along. Or if
1112 : * we killed the tuple, they will see it's dead, and proceed as if
1113 : * the tuple never existed.
1114 : */
1115 4020 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1116 :
1117 : /*
1118 : * If there was a conflict, start from the beginning. We'll do
1119 : * the pre-check again, which will now find the conflicting tuple
1120 : * (unless it aborts before we get there).
1121 : */
1122 4020 : if (specConflict)
1123 : {
1124 10 : list_free(recheckIndexes);
1125 10 : goto vlock;
1126 : }
1127 :
1128 : /* Since there was no insertion conflict, we're done */
1129 : }
1130 : else
1131 : {
1132 : /* insert the tuple normally */
1133 11892026 : table_tuple_insert(resultRelationDesc, slot,
1134 : estate->es_output_cid,
1135 : 0, NULL);
1136 :
1137 : /* insert index entries for tuple */
1138 11892002 : if (resultRelInfo->ri_NumIndices > 0)
1139 3237252 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1140 : slot, estate, false,
1141 : false, NULL, NIL,
1142 : false);
1143 : }
1144 : }
1145 :
1146 11897340 : if (canSetTag)
1147 11896130 : (estate->es_processed)++;
1148 :
1149 : /*
1150 : * If this insert is the result of a partition key update that moved the
1151 : * tuple to a new partition, put this row into the transition NEW TABLE,
1152 : * if there is one. We need to do this separately for DELETE and INSERT
1153 : * because they happen on different tables.
1154 : */
1155 11897340 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1156 11897340 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1157 42 : && mtstate->mt_transition_capture->tcs_update_new_table)
1158 : {
1159 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1160 : NULL, NULL,
1161 : NULL,
1162 : NULL,
1163 : slot,
1164 : NULL,
1165 42 : mtstate->mt_transition_capture,
1166 : false);
1167 :
1168 : /*
1169 : * We've already captured the NEW TABLE row, so make sure any AR
1170 : * INSERT trigger fired below doesn't capture it again.
1171 : */
1172 42 : ar_insert_trig_tcs = NULL;
1173 : }
1174 :
1175 : /* AFTER ROW INSERT Triggers */
1176 11897340 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1177 : ar_insert_trig_tcs);
1178 :
1179 11897340 : list_free(recheckIndexes);
1180 :
1181 : /*
1182 : * Check any WITH CHECK OPTION constraints from parent views. We are
1183 : * required to do this after testing all constraints and uniqueness
1184 : * violations per the SQL spec, so we do it after actually inserting the
1185 : * record into the heap and all indexes.
1186 : *
1187 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1188 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1189 : *
1190 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1191 : * are looking for at this point.
1192 : */
1193 11897340 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1194 322 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1195 :
1196 : /* Process RETURNING if present */
1197 11897206 : if (resultRelInfo->ri_projectReturning)
1198 3324 : result = ExecProcessReturning(resultRelInfo, slot, planSlot);
1199 :
1200 11897194 : if (inserted_tuple)
1201 676 : *inserted_tuple = slot;
1202 11897194 : if (insert_destrel)
1203 676 : *insert_destrel = resultRelInfo;
1204 :
1205 11897194 : return result;
1206 : }
1207 :
1208 : /* ----------------------------------------------------------------
1209 : * ExecBatchInsert
1210 : *
1211 : * Insert multiple tuples in an efficient way.
1212 : * Currently, this handles inserting into a foreign table without
1213 : * RETURNING clause.
1214 : * ----------------------------------------------------------------
1215 : */
1216 : static void
1217 60 : ExecBatchInsert(ModifyTableState *mtstate,
1218 : ResultRelInfo *resultRelInfo,
1219 : TupleTableSlot **slots,
1220 : TupleTableSlot **planSlots,
1221 : int numSlots,
1222 : EState *estate,
1223 : bool canSetTag)
1224 : {
1225 : int i;
1226 60 : int numInserted = numSlots;
1227 60 : TupleTableSlot *slot = NULL;
1228 : TupleTableSlot **rslots;
1229 :
1230 : /*
1231 : * insert into foreign table: let the FDW do it
1232 : */
1233 60 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1234 : resultRelInfo,
1235 : slots,
1236 : planSlots,
1237 : &numInserted);
1238 :
1239 140348 : for (i = 0; i < numInserted; i++)
1240 : {
1241 140288 : slot = rslots[i];
1242 :
1243 : /*
1244 : * AFTER ROW Triggers might reference the tableoid column, so
1245 : * (re-)initialize tts_tableOid before evaluating them.
1246 : */
1247 140288 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1248 :
1249 : /* AFTER ROW INSERT Triggers */
1250 140288 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1251 140288 : mtstate->mt_transition_capture);
1252 :
1253 : /*
1254 : * Check any WITH CHECK OPTION constraints from parent views. See the
1255 : * comment in ExecInsert.
1256 : */
1257 140288 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1258 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1259 : }
1260 :
1261 60 : if (canSetTag && numInserted > 0)
1262 60 : estate->es_processed += numInserted;
1263 :
1264 : /* Clean up all the slots, ready for the next batch */
1265 140348 : for (i = 0; i < numSlots; i++)
1266 : {
1267 140288 : ExecClearTuple(slots[i]);
1268 140288 : ExecClearTuple(planSlots[i]);
1269 : }
1270 60 : resultRelInfo->ri_NumSlots = 0;
1271 60 : }
1272 :
1273 : /*
1274 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1275 : */
1276 : static void
1277 36 : ExecPendingInserts(EState *estate)
1278 : {
1279 : ListCell *l1,
1280 : *l2;
1281 :
1282 74 : forboth(l1, estate->es_insert_pending_result_relations,
1283 : l2, estate->es_insert_pending_modifytables)
1284 : {
1285 38 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1286 38 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1287 :
1288 : Assert(mtstate);
1289 38 : ExecBatchInsert(mtstate, resultRelInfo,
1290 : resultRelInfo->ri_Slots,
1291 : resultRelInfo->ri_PlanSlots,
1292 : resultRelInfo->ri_NumSlots,
1293 38 : estate, mtstate->canSetTag);
1294 : }
1295 :
1296 36 : list_free(estate->es_insert_pending_result_relations);
1297 36 : list_free(estate->es_insert_pending_modifytables);
1298 36 : estate->es_insert_pending_result_relations = NIL;
1299 36 : estate->es_insert_pending_modifytables = NIL;
1300 36 : }
1301 :
1302 : /*
1303 : * ExecDeletePrologue -- subroutine for ExecDelete
1304 : *
1305 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1306 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1307 : * the delete a no-op; otherwise, return true.
1308 : */
1309 : static bool
1310 1687410 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1311 : ItemPointer tupleid, HeapTuple oldtuple,
1312 : TupleTableSlot **epqreturnslot, TM_Result *result)
1313 : {
1314 1687410 : if (result)
1315 262 : *result = TM_Ok;
1316 :
1317 : /* BEFORE ROW DELETE triggers */
1318 1687410 : if (resultRelInfo->ri_TrigDesc &&
1319 6784 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1320 : {
1321 : /* Flush any pending inserts, so rows are visible to the triggers */
1322 340 : if (context->estate->es_insert_pending_result_relations != NIL)
1323 2 : ExecPendingInserts(context->estate);
1324 :
1325 340 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1326 : resultRelInfo, tupleid, oldtuple,
1327 : epqreturnslot, result, &context->tmfd);
1328 : }
1329 :
1330 1687070 : return true;
1331 : }
1332 :
1333 : /*
1334 : * ExecDeleteAct -- subroutine for ExecDelete
1335 : *
1336 : * Actually delete the tuple from a plain table.
1337 : *
1338 : * Caller is in charge of doing EvalPlanQual as necessary
1339 : */
1340 : static TM_Result
1341 1687252 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1342 : ItemPointer tupleid, bool changingPart)
1343 : {
1344 1687252 : EState *estate = context->estate;
1345 :
1346 1687252 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1347 : estate->es_output_cid,
1348 : estate->es_snapshot,
1349 : estate->es_crosscheck_snapshot,
1350 : true /* wait for commit */ ,
1351 : &context->tmfd,
1352 : changingPart);
1353 : }
1354 :
1355 : /*
1356 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1357 : *
1358 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1359 : * including the UPDATE triggers if the deletion is being done as part of a
1360 : * cross-partition tuple move.
1361 : */
1362 : static void
1363 1687196 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1364 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1365 : {
1366 1687196 : ModifyTableState *mtstate = context->mtstate;
1367 1687196 : EState *estate = context->estate;
1368 : TransitionCaptureState *ar_delete_trig_tcs;
1369 :
1370 : /*
1371 : * If this delete is the result of a partition key update that moved the
1372 : * tuple to a new partition, put this row into the transition OLD TABLE,
1373 : * if there is one. We need to do this separately for DELETE and INSERT
1374 : * because they happen on different tables.
1375 : */
1376 1687196 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1377 1687196 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1378 42 : mtstate->mt_transition_capture->tcs_update_old_table)
1379 : {
1380 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1381 : NULL, NULL,
1382 : tupleid, oldtuple,
1383 42 : NULL, NULL, mtstate->mt_transition_capture,
1384 : false);
1385 :
1386 : /*
1387 : * We've already captured the OLD TABLE row, so make sure any AR
1388 : * DELETE trigger fired below doesn't capture it again.
1389 : */
1390 42 : ar_delete_trig_tcs = NULL;
1391 : }
1392 :
1393 : /* AFTER ROW DELETE Triggers */
1394 1687196 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1395 : ar_delete_trig_tcs, changingPart);
1396 1687196 : }
1397 :
1398 : /* ----------------------------------------------------------------
1399 : * ExecDelete
1400 : *
1401 : * DELETE is like UPDATE, except that we delete the tuple and no
1402 : * index modifications are needed.
1403 : *
1404 : * When deleting from a table, tupleid identifies the tuple to
1405 : * delete and oldtuple is NULL. When deleting from a view,
1406 : * oldtuple is passed to the INSTEAD OF triggers and identifies
1407 : * what to delete, and tupleid is invalid. When deleting from a
1408 : * foreign table, tupleid is invalid; the FDW has to figure out
1409 : * which row to delete using data from the planSlot. oldtuple is
1410 : * passed to foreign table triggers; it is NULL when the foreign
1411 : * table has no relevant triggers. We use tupleDeleted to indicate
1412 : * whether the tuple is actually deleted, callers can use it to
1413 : * decide whether to continue the operation. When this DELETE is a
1414 : * part of an UPDATE of partition-key, then the slot returned by
1415 : * EvalPlanQual() is passed back using output parameter epqreturnslot.
1416 : *
1417 : * Returns RETURNING result if any, otherwise NULL.
1418 : * ----------------------------------------------------------------
1419 : */
1420 : static TupleTableSlot *
1421 1687148 : ExecDelete(ModifyTableContext *context,
1422 : ResultRelInfo *resultRelInfo,
1423 : ItemPointer tupleid,
1424 : HeapTuple oldtuple,
1425 : bool processReturning,
1426 : bool changingPart,
1427 : bool canSetTag,
1428 : bool *tupleDeleted,
1429 : TupleTableSlot **epqreturnslot)
1430 : {
1431 1687148 : EState *estate = context->estate;
1432 1687148 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1433 1687148 : TupleTableSlot *slot = NULL;
1434 : TM_Result result;
1435 :
1436 1687148 : if (tupleDeleted)
1437 862 : *tupleDeleted = false;
1438 :
1439 : /*
1440 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1441 : * done if it says we are.
1442 : */
1443 1687148 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1444 : epqreturnslot, NULL))
1445 28 : return NULL;
1446 :
1447 : /* INSTEAD OF ROW DELETE Triggers */
1448 1687086 : if (resultRelInfo->ri_TrigDesc &&
1449 6684 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1450 48 : {
1451 : bool dodelete;
1452 :
1453 : Assert(oldtuple != NULL);
1454 54 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1455 :
1456 54 : if (!dodelete) /* "do nothing" */
1457 6 : return NULL;
1458 : }
1459 1687032 : else if (resultRelInfo->ri_FdwRoutine)
1460 : {
1461 : /*
1462 : * delete from foreign table: let the FDW do it
1463 : *
1464 : * We offer the returning slot as a place to store RETURNING data,
1465 : * although the FDW can return some other slot if it wants.
1466 : */
1467 34 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1468 34 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1469 : resultRelInfo,
1470 : slot,
1471 : context->planSlot);
1472 :
1473 34 : if (slot == NULL) /* "do nothing" */
1474 0 : return NULL;
1475 :
1476 : /*
1477 : * RETURNING expressions might reference the tableoid column, so
1478 : * (re)initialize tts_tableOid before evaluating them.
1479 : */
1480 34 : if (TTS_EMPTY(slot))
1481 6 : ExecStoreAllNullTuple(slot);
1482 :
1483 34 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1484 : }
1485 : else
1486 : {
1487 : /*
1488 : * delete the tuple
1489 : *
1490 : * Note: if context->estate->es_crosscheck_snapshot isn't
1491 : * InvalidSnapshot, we check that the row to be deleted is visible to
1492 : * that snapshot, and throw a can't-serialize error if not. This is a
1493 : * special-case behavior needed for referential integrity updates in
1494 : * transaction-snapshot mode transactions.
1495 : */
1496 1686998 : ldelete:
1497 1687002 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1498 :
1499 1686966 : switch (result)
1500 : {
1501 30 : case TM_SelfModified:
1502 :
1503 : /*
1504 : * The target tuple was already updated or deleted by the
1505 : * current command, or by a later command in the current
1506 : * transaction. The former case is possible in a join DELETE
1507 : * where multiple tuples join to the same target tuple. This
1508 : * is somewhat questionable, but Postgres has always allowed
1509 : * it: we just ignore additional deletion attempts.
1510 : *
1511 : * The latter case arises if the tuple is modified by a
1512 : * command in a BEFORE trigger, or perhaps by a command in a
1513 : * volatile function used in the query. In such situations we
1514 : * should not ignore the deletion, but it is equally unsafe to
1515 : * proceed. We don't want to discard the original DELETE
1516 : * while keeping the triggered actions based on its deletion;
1517 : * and it would be no better to allow the original DELETE
1518 : * while discarding updates that it triggered. The row update
1519 : * carries some information that might be important according
1520 : * to business rules; so throwing an error is the only safe
1521 : * course.
1522 : *
1523 : * If a trigger actually intends this type of interaction, it
1524 : * can re-execute the DELETE and then return NULL to cancel
1525 : * the outer delete.
1526 : */
1527 30 : if (context->tmfd.cmax != estate->es_output_cid)
1528 6 : ereport(ERROR,
1529 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1530 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1531 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1532 :
1533 : /* Else, already deleted by self; nothing to do */
1534 24 : return NULL;
1535 :
1536 1686876 : case TM_Ok:
1537 1686876 : break;
1538 :
1539 54 : case TM_Updated:
1540 : {
1541 : TupleTableSlot *inputslot;
1542 : TupleTableSlot *epqslot;
1543 :
1544 54 : if (IsolationUsesXactSnapshot())
1545 0 : ereport(ERROR,
1546 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1547 : errmsg("could not serialize access due to concurrent update")));
1548 :
1549 : /*
1550 : * Already know that we're going to need to do EPQ, so
1551 : * fetch tuple directly into the right slot.
1552 : */
1553 54 : EvalPlanQualBegin(context->epqstate);
1554 54 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1555 : resultRelInfo->ri_RangeTableIndex);
1556 :
1557 54 : result = table_tuple_lock(resultRelationDesc, tupleid,
1558 : estate->es_snapshot,
1559 : inputslot, estate->es_output_cid,
1560 : LockTupleExclusive, LockWaitBlock,
1561 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1562 : &context->tmfd);
1563 :
1564 50 : switch (result)
1565 : {
1566 44 : case TM_Ok:
1567 : Assert(context->tmfd.traversed);
1568 44 : epqslot = EvalPlanQual(context->epqstate,
1569 : resultRelationDesc,
1570 : resultRelInfo->ri_RangeTableIndex,
1571 : inputslot);
1572 44 : if (TupIsNull(epqslot))
1573 : /* Tuple not passing quals anymore, exiting... */
1574 28 : return NULL;
1575 :
1576 : /*
1577 : * If requested, skip delete and pass back the
1578 : * updated row.
1579 : */
1580 16 : if (epqreturnslot)
1581 : {
1582 12 : *epqreturnslot = epqslot;
1583 12 : return NULL;
1584 : }
1585 : else
1586 4 : goto ldelete;
1587 :
1588 4 : case TM_SelfModified:
1589 :
1590 : /*
1591 : * This can be reached when following an update
1592 : * chain from a tuple updated by another session,
1593 : * reaching a tuple that was already updated in
1594 : * this transaction. If previously updated by this
1595 : * command, ignore the delete, otherwise error
1596 : * out.
1597 : *
1598 : * See also TM_SelfModified response to
1599 : * table_tuple_delete() above.
1600 : */
1601 4 : if (context->tmfd.cmax != estate->es_output_cid)
1602 2 : ereport(ERROR,
1603 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1604 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1605 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1606 2 : return NULL;
1607 :
1608 2 : case TM_Deleted:
1609 : /* tuple already deleted; nothing to do */
1610 2 : return NULL;
1611 :
1612 0 : default:
1613 :
1614 : /*
1615 : * TM_Invisible should be impossible because we're
1616 : * waiting for updated row versions, and would
1617 : * already have errored out if the first version
1618 : * is invisible.
1619 : *
1620 : * TM_Updated should be impossible, because we're
1621 : * locking the latest version via
1622 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1623 : */
1624 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1625 : result);
1626 : return NULL;
1627 : }
1628 :
1629 : Assert(false);
1630 : break;
1631 : }
1632 :
1633 6 : case TM_Deleted:
1634 6 : if (IsolationUsesXactSnapshot())
1635 0 : ereport(ERROR,
1636 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1637 : errmsg("could not serialize access due to concurrent delete")));
1638 : /* tuple already deleted; nothing to do */
1639 6 : return NULL;
1640 :
1641 0 : default:
1642 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1643 : result);
1644 : return NULL;
1645 : }
1646 :
1647 : /*
1648 : * Note: Normally one would think that we have to delete index tuples
1649 : * associated with the heap tuple now...
1650 : *
1651 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1652 : * take care of it later. We can't delete index tuples immediately
1653 : * anyway, since the tuple is still visible to other transactions.
1654 : */
1655 : }
1656 :
1657 1686958 : if (canSetTag)
1658 1685922 : (estate->es_processed)++;
1659 :
1660 : /* Tell caller that the delete actually happened. */
1661 1686958 : if (tupleDeleted)
1662 808 : *tupleDeleted = true;
1663 :
1664 1686958 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1665 :
1666 : /* Process RETURNING if present and if requested */
1667 1686958 : if (processReturning && resultRelInfo->ri_projectReturning)
1668 : {
1669 : /*
1670 : * We have to put the target tuple into a slot, which means first we
1671 : * gotta fetch it. We can use the trigger tuple slot.
1672 : */
1673 : TupleTableSlot *rslot;
1674 :
1675 872 : if (resultRelInfo->ri_FdwRoutine)
1676 : {
1677 : /* FDW must have provided a slot containing the deleted row */
1678 : Assert(!TupIsNull(slot));
1679 : }
1680 : else
1681 : {
1682 866 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1683 866 : if (oldtuple != NULL)
1684 : {
1685 24 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1686 : }
1687 : else
1688 : {
1689 842 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1690 : SnapshotAny, slot))
1691 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1692 : }
1693 : }
1694 :
1695 872 : rslot = ExecProcessReturning(resultRelInfo, slot, context->planSlot);
1696 :
1697 : /*
1698 : * Before releasing the target tuple again, make sure rslot has a
1699 : * local copy of any pass-by-reference values.
1700 : */
1701 872 : ExecMaterializeSlot(rslot);
1702 :
1703 872 : ExecClearTuple(slot);
1704 :
1705 872 : return rslot;
1706 : }
1707 :
1708 1686086 : return NULL;
1709 : }
1710 :
1711 : /*
1712 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1713 : *
1714 : * This works by first deleting the old tuple from the current partition,
1715 : * followed by inserting the new tuple into the root parent table, that is,
1716 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1717 : * correct partition.
1718 : *
1719 : * Returns true if the tuple has been successfully moved, or if it's found
1720 : * that the tuple was concurrently deleted so there's nothing more to do
1721 : * for the caller.
1722 : *
1723 : * False is returned if the tuple we're trying to move is found to have been
1724 : * concurrently updated. In that case, the caller must check if the updated
1725 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1726 : * this function again or perform a regular update accordingly. For MERGE,
1727 : * the updated tuple is not returned in *retry_slot; it has its own retry
1728 : * logic.
1729 : */
1730 : static bool
1731 904 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1732 : ResultRelInfo *resultRelInfo,
1733 : ItemPointer tupleid, HeapTuple oldtuple,
1734 : TupleTableSlot *slot,
1735 : bool canSetTag,
1736 : UpdateContext *updateCxt,
1737 : TupleTableSlot **retry_slot,
1738 : TupleTableSlot **inserted_tuple,
1739 : ResultRelInfo **insert_destrel)
1740 : {
1741 904 : ModifyTableState *mtstate = context->mtstate;
1742 904 : EState *estate = mtstate->ps.state;
1743 : TupleConversionMap *tupconv_map;
1744 : bool tuple_deleted;
1745 904 : TupleTableSlot *epqslot = NULL;
1746 :
1747 904 : context->cpUpdateReturningSlot = NULL;
1748 904 : *retry_slot = NULL;
1749 :
1750 : /*
1751 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1752 : * to migrate to a different partition. Maybe this can be implemented
1753 : * some day, but it seems a fringe feature with little redeeming value.
1754 : */
1755 904 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1756 0 : ereport(ERROR,
1757 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1758 : errmsg("invalid ON UPDATE specification"),
1759 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1760 :
1761 : /*
1762 : * When an UPDATE is run directly on a leaf partition, simply fail with a
1763 : * partition constraint violation error.
1764 : */
1765 904 : if (resultRelInfo == mtstate->rootResultRelInfo)
1766 42 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1767 :
1768 : /* Initialize tuple routing info if not already done. */
1769 862 : if (mtstate->mt_partition_tuple_routing == NULL)
1770 : {
1771 572 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1772 : MemoryContext oldcxt;
1773 :
1774 : /* Things built here have to last for the query duration. */
1775 572 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1776 :
1777 572 : mtstate->mt_partition_tuple_routing =
1778 572 : ExecSetupPartitionTupleRouting(estate, rootRel);
1779 :
1780 : /*
1781 : * Before a partition's tuple can be re-routed, it must first be
1782 : * converted to the root's format, so we'll need a slot for storing
1783 : * such tuples.
1784 : */
1785 : Assert(mtstate->mt_root_tuple_slot == NULL);
1786 572 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1787 :
1788 572 : MemoryContextSwitchTo(oldcxt);
1789 : }
1790 :
1791 : /*
1792 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1793 : * We want to return rows from INSERT.
1794 : */
1795 862 : ExecDelete(context, resultRelInfo,
1796 : tupleid, oldtuple,
1797 : false, /* processReturning */
1798 : true, /* changingPart */
1799 : false, /* canSetTag */
1800 : &tuple_deleted, &epqslot);
1801 :
1802 : /*
1803 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
1804 : * it was already deleted by self, or it was concurrently deleted by
1805 : * another transaction), then we should skip the insert as well;
1806 : * otherwise, an UPDATE could cause an increase in the total number of
1807 : * rows across all partitions, which is clearly wrong.
1808 : *
1809 : * For a normal UPDATE, the case where the tuple has been the subject of a
1810 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
1811 : * machinery, but for an UPDATE that we've translated into a DELETE from
1812 : * this partition and an INSERT into some other partition, that's not
1813 : * available, because CTID chains can't span relation boundaries. We
1814 : * mimic the semantics to a limited extent by skipping the INSERT if the
1815 : * DELETE fails to find a tuple. This ensures that two concurrent
1816 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
1817 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
1818 : * it.
1819 : */
1820 860 : if (!tuple_deleted)
1821 : {
1822 : /*
1823 : * epqslot will be typically NULL. But when ExecDelete() finds that
1824 : * another transaction has concurrently updated the same row, it
1825 : * re-fetches the row, skips the delete, and epqslot is set to the
1826 : * re-fetched tuple slot. In that case, we need to do all the checks
1827 : * again. For MERGE, we leave everything to the caller (it must do
1828 : * additional rechecking, and might end up executing a different
1829 : * action entirely).
1830 : */
1831 52 : if (context->relaction != NULL)
1832 10 : return false;
1833 42 : else if (TupIsNull(epqslot))
1834 36 : return true;
1835 : else
1836 : {
1837 : /* Fetch the most recent version of old tuple. */
1838 : TupleTableSlot *oldSlot;
1839 :
1840 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
1841 6 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
1842 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
1843 6 : oldSlot = resultRelInfo->ri_oldTupleSlot;
1844 6 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
1845 : tupleid,
1846 : SnapshotAny,
1847 : oldSlot))
1848 0 : elog(ERROR, "failed to fetch tuple being updated");
1849 : /* and project the new tuple to retry the UPDATE with */
1850 6 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
1851 : oldSlot);
1852 6 : return false;
1853 : }
1854 : }
1855 :
1856 : /*
1857 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
1858 : * convert the tuple into root's tuple descriptor if needed, since
1859 : * ExecInsert() starts the search from root.
1860 : */
1861 808 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1862 808 : if (tupconv_map != NULL)
1863 284 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1864 : slot,
1865 : mtstate->mt_root_tuple_slot);
1866 :
1867 : /* Tuple routing starts from the root table. */
1868 680 : context->cpUpdateReturningSlot =
1869 808 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
1870 : inserted_tuple, insert_destrel);
1871 :
1872 : /*
1873 : * Reset the transition state that may possibly have been written by
1874 : * INSERT.
1875 : */
1876 680 : if (mtstate->mt_transition_capture)
1877 42 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
1878 :
1879 : /* We're done moving. */
1880 680 : return true;
1881 : }
1882 :
1883 : /*
1884 : * ExecUpdatePrologue -- subroutine for ExecUpdate
1885 : *
1886 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
1887 : * triggers. We return false if one of them makes the update a no-op;
1888 : * otherwise, return true.
1889 : */
1890 : static bool
1891 373256 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1892 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1893 : TM_Result *result)
1894 : {
1895 373256 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1896 :
1897 373256 : if (result)
1898 1452 : *result = TM_Ok;
1899 :
1900 373256 : ExecMaterializeSlot(slot);
1901 :
1902 : /*
1903 : * Open the table's indexes, if we have not done so already, so that we
1904 : * can add new index entries for the updated tuple.
1905 : */
1906 373256 : if (resultRelationDesc->rd_rel->relhasindex &&
1907 278110 : resultRelInfo->ri_IndexRelationDescs == NULL)
1908 9882 : ExecOpenIndices(resultRelInfo, false);
1909 :
1910 : /* BEFORE ROW UPDATE triggers */
1911 373256 : if (resultRelInfo->ri_TrigDesc &&
1912 5688 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
1913 : {
1914 : /* Flush any pending inserts, so rows are visible to the triggers */
1915 2530 : if (context->estate->es_insert_pending_result_relations != NIL)
1916 2 : ExecPendingInserts(context->estate);
1917 :
1918 2530 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
1919 : resultRelInfo, tupleid, oldtuple, slot,
1920 : result, &context->tmfd);
1921 : }
1922 :
1923 370726 : return true;
1924 : }
1925 :
1926 : /*
1927 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
1928 : *
1929 : * Apply the final modifications to the tuple slot before the update.
1930 : * (This is split out because we also need it in the foreign-table code path.)
1931 : */
1932 : static void
1933 373052 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
1934 : TupleTableSlot *slot,
1935 : EState *estate)
1936 : {
1937 373052 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1938 :
1939 : /*
1940 : * Constraints and GENERATED expressions might reference the tableoid
1941 : * column, so (re-)initialize tts_tableOid before evaluating them.
1942 : */
1943 373052 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1944 :
1945 : /*
1946 : * Compute stored generated columns
1947 : */
1948 373052 : if (resultRelationDesc->rd_att->constr &&
1949 251190 : resultRelationDesc->rd_att->constr->has_generated_stored)
1950 258 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1951 : CMD_UPDATE);
1952 373052 : }
1953 :
1954 : /*
1955 : * ExecUpdateAct -- subroutine for ExecUpdate
1956 : *
1957 : * Actually update the tuple, when operating on a plain table. If the
1958 : * table is a partition, and the command was called referencing an ancestor
1959 : * partitioned table, this routine migrates the resulting tuple to another
1960 : * partition.
1961 : *
1962 : * The caller is in charge of keeping indexes current as necessary. The
1963 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
1964 : * be concurrently updated. However, in case of a cross-partition update,
1965 : * this routine does it.
1966 : */
1967 : static TM_Result
1968 372904 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1969 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
1970 : bool canSetTag, UpdateContext *updateCxt)
1971 : {
1972 372904 : EState *estate = context->estate;
1973 372904 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1974 : bool partition_constraint_failed;
1975 : TM_Result result;
1976 :
1977 372904 : updateCxt->crossPartUpdate = false;
1978 :
1979 : /*
1980 : * If we move the tuple to a new partition, we loop back here to recompute
1981 : * GENERATED values (which are allowed to be different across partitions)
1982 : * and recheck any RLS policies and constraints. We do not fire any
1983 : * BEFORE triggers of the new partition, however.
1984 : */
1985 372910 : lreplace:
1986 : /* Fill in GENERATEd columns */
1987 372910 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
1988 :
1989 : /* ensure slot is independent, consider e.g. EPQ */
1990 372910 : ExecMaterializeSlot(slot);
1991 :
1992 : /*
1993 : * If partition constraint fails, this row might get moved to another
1994 : * partition, in which case we should check the RLS CHECK policy just
1995 : * before inserting into the new partition, rather than doing it here.
1996 : * This is because a trigger on that partition might again change the row.
1997 : * So skip the WCO checks if the partition constraint fails.
1998 : */
1999 372910 : partition_constraint_failed =
2000 375172 : resultRelationDesc->rd_rel->relispartition &&
2001 2262 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2002 :
2003 : /* Check any RLS UPDATE WITH CHECK policies */
2004 372910 : if (!partition_constraint_failed &&
2005 372006 : resultRelInfo->ri_WithCheckOptions != NIL)
2006 : {
2007 : /*
2008 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2009 : * we are looking for at this point.
2010 : */
2011 450 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2012 : resultRelInfo, slot, estate);
2013 : }
2014 :
2015 : /*
2016 : * If a partition check failed, try to move the row into the right
2017 : * partition.
2018 : */
2019 372862 : if (partition_constraint_failed)
2020 : {
2021 : TupleTableSlot *inserted_tuple,
2022 : *retry_slot;
2023 904 : ResultRelInfo *insert_destrel = NULL;
2024 :
2025 : /*
2026 : * ExecCrossPartitionUpdate will first DELETE the row from the
2027 : * partition it's currently in and then insert it back into the root
2028 : * table, which will re-route it to the correct partition. However,
2029 : * if the tuple has been concurrently updated, a retry is needed.
2030 : */
2031 904 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2032 : tupleid, oldtuple, slot,
2033 : canSetTag, updateCxt,
2034 : &retry_slot,
2035 : &inserted_tuple,
2036 : &insert_destrel))
2037 : {
2038 : /* success! */
2039 716 : updateCxt->updated = true;
2040 716 : updateCxt->crossPartUpdate = true;
2041 :
2042 : /*
2043 : * If the partitioned table being updated is referenced in foreign
2044 : * keys, queue up trigger events to check that none of them were
2045 : * violated. No special treatment is needed in
2046 : * non-cross-partition update situations, because the leaf
2047 : * partition's AR update triggers will take care of that. During
2048 : * cross-partition updates implemented as delete on the source
2049 : * partition followed by insert on the destination partition,
2050 : * AR-UPDATE triggers of the root table (that is, the table
2051 : * mentioned in the query) must be fired.
2052 : *
2053 : * NULL insert_destrel means that the move failed to occur, that
2054 : * is, the update failed, so no need to anything in that case.
2055 : */
2056 716 : if (insert_destrel &&
2057 676 : resultRelInfo->ri_TrigDesc &&
2058 302 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2059 240 : ExecCrossPartitionUpdateForeignKey(context,
2060 : resultRelInfo,
2061 : insert_destrel,
2062 : tupleid, slot,
2063 : inserted_tuple);
2064 :
2065 720 : return TM_Ok;
2066 : }
2067 :
2068 : /*
2069 : * No luck, a retry is needed. If running MERGE, we do not do so
2070 : * here; instead let it handle that on its own rules.
2071 : */
2072 16 : if (context->relaction != NULL)
2073 10 : return TM_Updated;
2074 :
2075 : /*
2076 : * ExecCrossPartitionUpdate installed an updated version of the new
2077 : * tuple in the retry slot; start over.
2078 : */
2079 6 : slot = retry_slot;
2080 6 : goto lreplace;
2081 : }
2082 :
2083 : /*
2084 : * Check the constraints of the tuple. We've already checked the
2085 : * partition constraint above; however, we must still ensure the tuple
2086 : * passes all other constraints, so we will call ExecConstraints() and
2087 : * have it validate all remaining checks.
2088 : */
2089 371958 : if (resultRelationDesc->rd_att->constr)
2090 250672 : ExecConstraints(resultRelInfo, slot, estate);
2091 :
2092 : /*
2093 : * replace the heap tuple
2094 : *
2095 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2096 : * the row to be updated is visible to that snapshot, and throw a
2097 : * can't-serialize error if not. This is a special-case behavior needed
2098 : * for referential integrity updates in transaction-snapshot mode
2099 : * transactions.
2100 : */
2101 371908 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2102 : estate->es_output_cid,
2103 : estate->es_snapshot,
2104 : estate->es_crosscheck_snapshot,
2105 : true /* wait for commit */ ,
2106 : &context->tmfd, &updateCxt->lockmode,
2107 : &updateCxt->updateIndexes);
2108 371884 : if (result == TM_Ok)
2109 371622 : updateCxt->updated = true;
2110 :
2111 371884 : return result;
2112 : }
2113 :
2114 : /*
2115 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2116 : *
2117 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2118 : * returns indicating that the tuple was updated.
2119 : */
2120 : static void
2121 371908 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2122 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2123 : HeapTuple oldtuple, TupleTableSlot *slot)
2124 : {
2125 371908 : ModifyTableState *mtstate = context->mtstate;
2126 371908 : List *recheckIndexes = NIL;
2127 :
2128 : /* insert index entries for tuple if necessary */
2129 371908 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2130 220872 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2131 : slot, context->estate,
2132 : true, false,
2133 : NULL, NIL,
2134 220872 : (updateCxt->updateIndexes == TU_Summarizing));
2135 :
2136 : /* AFTER ROW UPDATE Triggers */
2137 371890 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2138 : NULL, NULL,
2139 : tupleid, oldtuple, slot,
2140 : recheckIndexes,
2141 371890 : mtstate->operation == CMD_INSERT ?
2142 : mtstate->mt_oc_transition_capture :
2143 : mtstate->mt_transition_capture,
2144 : false);
2145 :
2146 371890 : list_free(recheckIndexes);
2147 :
2148 : /*
2149 : * Check any WITH CHECK OPTION constraints from parent views. We are
2150 : * required to do this after testing all constraints and uniqueness
2151 : * violations per the SQL spec, so we do it after actually updating the
2152 : * record in the heap and all indexes.
2153 : *
2154 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2155 : * are looking for at this point.
2156 : */
2157 371890 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2158 424 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2159 : slot, context->estate);
2160 371826 : }
2161 :
2162 : /*
2163 : * Queues up an update event using the target root partitioned table's
2164 : * trigger to check that a cross-partition update hasn't broken any foreign
2165 : * keys pointing into it.
2166 : */
2167 : static void
2168 240 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2169 : ResultRelInfo *sourcePartInfo,
2170 : ResultRelInfo *destPartInfo,
2171 : ItemPointer tupleid,
2172 : TupleTableSlot *oldslot,
2173 : TupleTableSlot *newslot)
2174 : {
2175 : ListCell *lc;
2176 : ResultRelInfo *rootRelInfo;
2177 : List *ancestorRels;
2178 :
2179 240 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2180 240 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2181 :
2182 : /*
2183 : * For any foreign keys that point directly into a non-root ancestors of
2184 : * the source partition, we can in theory fire an update event to enforce
2185 : * those constraints using their triggers, if we could tell that both the
2186 : * source and the destination partitions are under the same ancestor. But
2187 : * for now, we simply report an error that those cannot be enforced.
2188 : */
2189 534 : foreach(lc, ancestorRels)
2190 : {
2191 300 : ResultRelInfo *rInfo = lfirst(lc);
2192 300 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2193 300 : bool has_noncloned_fkey = false;
2194 :
2195 : /* Root ancestor's triggers will be processed. */
2196 300 : if (rInfo == rootRelInfo)
2197 234 : continue;
2198 :
2199 66 : if (trigdesc && trigdesc->trig_update_after_row)
2200 : {
2201 228 : for (int i = 0; i < trigdesc->numtriggers; i++)
2202 : {
2203 168 : Trigger *trig = &trigdesc->triggers[i];
2204 :
2205 174 : if (!trig->tgisclone &&
2206 6 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2207 : {
2208 6 : has_noncloned_fkey = true;
2209 6 : break;
2210 : }
2211 : }
2212 : }
2213 :
2214 66 : if (has_noncloned_fkey)
2215 6 : ereport(ERROR,
2216 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2217 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2218 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2219 : RelationGetRelationName(rInfo->ri_RelationDesc),
2220 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2221 : errhint("Consider defining the foreign key on table \"%s\".",
2222 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2223 : }
2224 :
2225 : /* Perform the root table's triggers. */
2226 234 : ExecARUpdateTriggers(context->estate,
2227 : rootRelInfo, sourcePartInfo, destPartInfo,
2228 : tupleid, NULL, newslot, NIL, NULL, true);
2229 234 : }
2230 :
2231 : /* ----------------------------------------------------------------
2232 : * ExecUpdate
2233 : *
2234 : * note: we can't run UPDATE queries with transactions
2235 : * off because UPDATEs are actually INSERTs and our
2236 : * scan will mistakenly loop forever, updating the tuple
2237 : * it just inserted.. This should be fixed but until it
2238 : * is, we don't want to get stuck in an infinite loop
2239 : * which corrupts your database..
2240 : *
2241 : * When updating a table, tupleid identifies the tuple to
2242 : * update and oldtuple is NULL. When updating a view, oldtuple
2243 : * is passed to the INSTEAD OF triggers and identifies what to
2244 : * update, and tupleid is invalid. When updating a foreign table,
2245 : * tupleid is invalid; the FDW has to figure out which row to
2246 : * update using data from the planSlot. oldtuple is passed to
2247 : * foreign table triggers; it is NULL when the foreign table has
2248 : * no relevant triggers.
2249 : *
2250 : * slot contains the new tuple value to be stored.
2251 : * planSlot is the output of the ModifyTable's subplan; we use it
2252 : * to access values from other input tables (for RETURNING),
2253 : * row-ID junk columns, etc.
2254 : *
2255 : * Returns RETURNING result if any, otherwise NULL.
2256 : * ----------------------------------------------------------------
2257 : */
2258 : static TupleTableSlot *
2259 371804 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2260 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2261 : bool canSetTag)
2262 : {
2263 371804 : EState *estate = context->estate;
2264 371804 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2265 371804 : UpdateContext updateCxt = {0};
2266 : TM_Result result;
2267 :
2268 : /*
2269 : * abort the operation if not running transactions
2270 : */
2271 371804 : if (IsBootstrapProcessingMode())
2272 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2273 :
2274 : /*
2275 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2276 : * done if it says we are.
2277 : */
2278 371804 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2279 138 : return NULL;
2280 :
2281 : /* INSTEAD OF ROW UPDATE Triggers */
2282 371630 : if (resultRelInfo->ri_TrigDesc &&
2283 5372 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2284 : {
2285 114 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2286 : oldtuple, slot))
2287 18 : return NULL; /* "do nothing" */
2288 : }
2289 371516 : else if (resultRelInfo->ri_FdwRoutine)
2290 : {
2291 : /* Fill in GENERATEd columns */
2292 142 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2293 :
2294 : /*
2295 : * update in foreign table: let the FDW do it
2296 : */
2297 142 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2298 : resultRelInfo,
2299 : slot,
2300 : context->planSlot);
2301 :
2302 142 : if (slot == NULL) /* "do nothing" */
2303 2 : return NULL;
2304 :
2305 : /*
2306 : * AFTER ROW Triggers or RETURNING expressions might reference the
2307 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2308 : * them. (This covers the case where the FDW replaced the slot.)
2309 : */
2310 140 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2311 : }
2312 : else
2313 : {
2314 : /*
2315 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2316 : * must loop back here to try again. (We don't need to redo triggers,
2317 : * however. If there are any BEFORE triggers then trigger.c will have
2318 : * done table_tuple_lock to lock the correct tuple, so there's no need
2319 : * to do them again.)
2320 : */
2321 371374 : redo_act:
2322 371470 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2323 : canSetTag, &updateCxt);
2324 :
2325 : /*
2326 : * If ExecUpdateAct reports that a cross-partition update was done,
2327 : * then the RETURNING tuple (if any) has been projected and there's
2328 : * nothing else for us to do.
2329 : */
2330 371184 : if (updateCxt.crossPartUpdate)
2331 654 : return context->cpUpdateReturningSlot;
2332 :
2333 370530 : switch (result)
2334 : {
2335 84 : case TM_SelfModified:
2336 :
2337 : /*
2338 : * The target tuple was already updated or deleted by the
2339 : * current command, or by a later command in the current
2340 : * transaction. The former case is possible in a join UPDATE
2341 : * where multiple tuples join to the same target tuple. This
2342 : * is pretty questionable, but Postgres has always allowed it:
2343 : * we just execute the first update action and ignore
2344 : * additional update attempts.
2345 : *
2346 : * The latter case arises if the tuple is modified by a
2347 : * command in a BEFORE trigger, or perhaps by a command in a
2348 : * volatile function used in the query. In such situations we
2349 : * should not ignore the update, but it is equally unsafe to
2350 : * proceed. We don't want to discard the original UPDATE
2351 : * while keeping the triggered actions based on it; and we
2352 : * have no principled way to merge this update with the
2353 : * previous ones. So throwing an error is the only safe
2354 : * course.
2355 : *
2356 : * If a trigger actually intends this type of interaction, it
2357 : * can re-execute the UPDATE (assuming it can figure out how)
2358 : * and then return NULL to cancel the outer update.
2359 : */
2360 84 : if (context->tmfd.cmax != estate->es_output_cid)
2361 6 : ereport(ERROR,
2362 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2363 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2364 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2365 :
2366 : /* Else, already updated by self; nothing to do */
2367 78 : return NULL;
2368 :
2369 370292 : case TM_Ok:
2370 370292 : break;
2371 :
2372 146 : case TM_Updated:
2373 : {
2374 : TupleTableSlot *inputslot;
2375 : TupleTableSlot *epqslot;
2376 : TupleTableSlot *oldSlot;
2377 :
2378 146 : if (IsolationUsesXactSnapshot())
2379 2 : ereport(ERROR,
2380 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2381 : errmsg("could not serialize access due to concurrent update")));
2382 :
2383 : /*
2384 : * Already know that we're going to need to do EPQ, so
2385 : * fetch tuple directly into the right slot.
2386 : */
2387 144 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2388 : resultRelInfo->ri_RangeTableIndex);
2389 :
2390 144 : result = table_tuple_lock(resultRelationDesc, tupleid,
2391 : estate->es_snapshot,
2392 : inputslot, estate->es_output_cid,
2393 : updateCxt.lockmode, LockWaitBlock,
2394 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2395 : &context->tmfd);
2396 :
2397 140 : switch (result)
2398 : {
2399 130 : case TM_Ok:
2400 : Assert(context->tmfd.traversed);
2401 :
2402 130 : epqslot = EvalPlanQual(context->epqstate,
2403 : resultRelationDesc,
2404 : resultRelInfo->ri_RangeTableIndex,
2405 : inputslot);
2406 130 : if (TupIsNull(epqslot))
2407 : /* Tuple not passing quals anymore, exiting... */
2408 34 : return NULL;
2409 :
2410 : /* Make sure ri_oldTupleSlot is initialized. */
2411 96 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2412 0 : ExecInitUpdateProjection(context->mtstate,
2413 : resultRelInfo);
2414 :
2415 : /* Fetch the most recent version of old tuple. */
2416 96 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2417 96 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2418 : tupleid,
2419 : SnapshotAny,
2420 : oldSlot))
2421 0 : elog(ERROR, "failed to fetch tuple being updated");
2422 96 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2423 : epqslot, oldSlot);
2424 96 : goto redo_act;
2425 :
2426 2 : case TM_Deleted:
2427 : /* tuple already deleted; nothing to do */
2428 2 : return NULL;
2429 :
2430 8 : case TM_SelfModified:
2431 :
2432 : /*
2433 : * This can be reached when following an update
2434 : * chain from a tuple updated by another session,
2435 : * reaching a tuple that was already updated in
2436 : * this transaction. If previously modified by
2437 : * this command, ignore the redundant update,
2438 : * otherwise error out.
2439 : *
2440 : * See also TM_SelfModified response to
2441 : * table_tuple_update() above.
2442 : */
2443 8 : if (context->tmfd.cmax != estate->es_output_cid)
2444 2 : ereport(ERROR,
2445 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2446 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2447 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2448 6 : return NULL;
2449 :
2450 0 : default:
2451 : /* see table_tuple_lock call in ExecDelete() */
2452 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2453 : result);
2454 : return NULL;
2455 : }
2456 : }
2457 :
2458 : break;
2459 :
2460 8 : case TM_Deleted:
2461 8 : if (IsolationUsesXactSnapshot())
2462 0 : ereport(ERROR,
2463 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2464 : errmsg("could not serialize access due to concurrent delete")));
2465 : /* tuple already deleted; nothing to do */
2466 8 : return NULL;
2467 :
2468 0 : default:
2469 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2470 : result);
2471 : return NULL;
2472 : }
2473 : }
2474 :
2475 370522 : if (canSetTag)
2476 369930 : (estate->es_processed)++;
2477 :
2478 370522 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2479 : slot);
2480 :
2481 : /* Process RETURNING if present */
2482 370440 : if (resultRelInfo->ri_projectReturning)
2483 2128 : return ExecProcessReturning(resultRelInfo, slot, context->planSlot);
2484 :
2485 368312 : return NULL;
2486 : }
2487 :
2488 : /*
2489 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2490 : *
2491 : * Try to lock tuple for update as part of speculative insertion. If
2492 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2493 : * (but still lock row, even though it may not satisfy estate's
2494 : * snapshot).
2495 : *
2496 : * Returns true if we're done (with or without an update), or false if
2497 : * the caller must retry the INSERT from scratch.
2498 : */
2499 : static bool
2500 5200 : ExecOnConflictUpdate(ModifyTableContext *context,
2501 : ResultRelInfo *resultRelInfo,
2502 : ItemPointer conflictTid,
2503 : TupleTableSlot *excludedSlot,
2504 : bool canSetTag,
2505 : TupleTableSlot **returning)
2506 : {
2507 5200 : ModifyTableState *mtstate = context->mtstate;
2508 5200 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2509 5200 : Relation relation = resultRelInfo->ri_RelationDesc;
2510 5200 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2511 5200 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2512 : TM_FailureData tmfd;
2513 : LockTupleMode lockmode;
2514 : TM_Result test;
2515 : Datum xminDatum;
2516 : TransactionId xmin;
2517 : bool isnull;
2518 :
2519 : /* Determine lock mode to use */
2520 5200 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2521 :
2522 : /*
2523 : * Lock tuple for update. Don't follow updates when tuple cannot be
2524 : * locked without doing so. A row locking conflict here means our
2525 : * previous conclusion that the tuple is conclusively committed is not
2526 : * true anymore.
2527 : */
2528 5200 : test = table_tuple_lock(relation, conflictTid,
2529 5200 : context->estate->es_snapshot,
2530 5200 : existing, context->estate->es_output_cid,
2531 : lockmode, LockWaitBlock, 0,
2532 : &tmfd);
2533 5200 : switch (test)
2534 : {
2535 5176 : case TM_Ok:
2536 : /* success! */
2537 5176 : break;
2538 :
2539 24 : case TM_Invisible:
2540 :
2541 : /*
2542 : * This can occur when a just inserted tuple is updated again in
2543 : * the same command. E.g. because multiple rows with the same
2544 : * conflicting key values are inserted.
2545 : *
2546 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2547 : * case. We do not want to proceed because it would lead to the
2548 : * same row being updated a second time in some unspecified order,
2549 : * and in contrast to plain UPDATEs there's no historical behavior
2550 : * to break.
2551 : *
2552 : * It is the user's responsibility to prevent this situation from
2553 : * occurring. These problems are why the SQL standard similarly
2554 : * specifies that for SQL MERGE, an exception must be raised in
2555 : * the event of an attempt to update the same row twice.
2556 : */
2557 24 : xminDatum = slot_getsysattr(existing,
2558 : MinTransactionIdAttributeNumber,
2559 : &isnull);
2560 : Assert(!isnull);
2561 24 : xmin = DatumGetTransactionId(xminDatum);
2562 :
2563 24 : if (TransactionIdIsCurrentTransactionId(xmin))
2564 24 : ereport(ERROR,
2565 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2566 : /* translator: %s is a SQL command name */
2567 : errmsg("%s command cannot affect row a second time",
2568 : "ON CONFLICT DO UPDATE"),
2569 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2570 :
2571 : /* This shouldn't happen */
2572 0 : elog(ERROR, "attempted to lock invisible tuple");
2573 : break;
2574 :
2575 0 : case TM_SelfModified:
2576 :
2577 : /*
2578 : * This state should never be reached. As a dirty snapshot is used
2579 : * to find conflicting tuples, speculative insertion wouldn't have
2580 : * seen this row to conflict with.
2581 : */
2582 0 : elog(ERROR, "unexpected self-updated tuple");
2583 : break;
2584 :
2585 0 : case TM_Updated:
2586 0 : if (IsolationUsesXactSnapshot())
2587 0 : ereport(ERROR,
2588 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2589 : errmsg("could not serialize access due to concurrent update")));
2590 :
2591 : /*
2592 : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2593 : * a partitioned table we shouldn't reach to a case where tuple to
2594 : * be lock is moved to another partition due to concurrent update
2595 : * of the partition key.
2596 : */
2597 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2598 :
2599 : /*
2600 : * Tell caller to try again from the very start.
2601 : *
2602 : * It does not make sense to use the usual EvalPlanQual() style
2603 : * loop here, as the new version of the row might not conflict
2604 : * anymore, or the conflicting tuple has actually been deleted.
2605 : */
2606 0 : ExecClearTuple(existing);
2607 0 : return false;
2608 :
2609 0 : case TM_Deleted:
2610 0 : if (IsolationUsesXactSnapshot())
2611 0 : ereport(ERROR,
2612 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2613 : errmsg("could not serialize access due to concurrent delete")));
2614 :
2615 : /* see TM_Updated case */
2616 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2617 0 : ExecClearTuple(existing);
2618 0 : return false;
2619 :
2620 0 : default:
2621 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2622 : }
2623 :
2624 : /* Success, the tuple is locked. */
2625 :
2626 : /*
2627 : * Verify that the tuple is visible to our MVCC snapshot if the current
2628 : * isolation level mandates that.
2629 : *
2630 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2631 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2632 : *
2633 : * This means we only ever continue when a new command in the current
2634 : * transaction could see the row, even though in READ COMMITTED mode the
2635 : * tuple will not be visible according to the current statement's
2636 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2637 : * versions.
2638 : */
2639 5176 : ExecCheckTupleVisible(context->estate, relation, existing);
2640 :
2641 : /*
2642 : * Make tuple and any needed join variables available to ExecQual and
2643 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2644 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2645 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2646 : * other redirection.
2647 : */
2648 5176 : econtext->ecxt_scantuple = existing;
2649 5176 : econtext->ecxt_innertuple = excludedSlot;
2650 5176 : econtext->ecxt_outertuple = NULL;
2651 :
2652 5176 : if (!ExecQual(onConflictSetWhere, econtext))
2653 : {
2654 32 : ExecClearTuple(existing); /* see return below */
2655 32 : InstrCountFiltered1(&mtstate->ps, 1);
2656 32 : return true; /* done with the tuple */
2657 : }
2658 :
2659 5144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2660 : {
2661 : /*
2662 : * Check target's existing tuple against UPDATE-applicable USING
2663 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2664 : *
2665 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2666 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2667 : * but that's almost the extent of its special handling for ON
2668 : * CONFLICT DO UPDATE.
2669 : *
2670 : * The rewriter will also have associated UPDATE applicable straight
2671 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2672 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2673 : * kinds, so there is no danger of spurious over-enforcement in the
2674 : * INSERT or UPDATE path.
2675 : */
2676 60 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2677 : existing,
2678 : mtstate->ps.state);
2679 : }
2680 :
2681 : /* Project the new tuple version */
2682 5120 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2683 :
2684 : /*
2685 : * Note that it is possible that the target tuple has been modified in
2686 : * this session, after the above table_tuple_lock. We choose to not error
2687 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2688 : * This can happen if an UPDATE is triggered from within ExecQual(),
2689 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2690 : * wCTE in the ON CONFLICT's SET.
2691 : */
2692 :
2693 : /* Execute UPDATE with projection */
2694 10210 : *returning = ExecUpdate(context, resultRelInfo,
2695 : conflictTid, NULL,
2696 5120 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2697 : canSetTag);
2698 :
2699 : /*
2700 : * Clear out existing tuple, as there might not be another conflict among
2701 : * the next input rows. Don't want to hold resources till the end of the
2702 : * query.
2703 : */
2704 5090 : ExecClearTuple(existing);
2705 5090 : return true;
2706 : }
2707 :
2708 : /*
2709 : * Perform MERGE.
2710 : */
2711 : static TupleTableSlot *
2712 5042 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2713 : ItemPointer tupleid, bool canSetTag)
2714 : {
2715 : bool matched;
2716 :
2717 : /*-----
2718 : * If we are dealing with a WHEN MATCHED case (tupleid is valid), we
2719 : * execute the first action for which the additional WHEN MATCHED AND
2720 : * quals pass. If an action without quals is found, that action is
2721 : * executed.
2722 : *
2723 : * Similarly, if we are dealing with WHEN NOT MATCHED case, we look at
2724 : * the given WHEN NOT MATCHED actions in sequence until one passes.
2725 : *
2726 : * Things get interesting in case of concurrent update/delete of the
2727 : * target tuple. Such concurrent update/delete is detected while we are
2728 : * executing a WHEN MATCHED action.
2729 : *
2730 : * A concurrent update can:
2731 : *
2732 : * 1. modify the target tuple so that it no longer satisfies the
2733 : * additional quals attached to the current WHEN MATCHED action
2734 : *
2735 : * In this case, we are still dealing with a WHEN MATCHED case.
2736 : * We recheck the list of WHEN MATCHED actions from the start and
2737 : * choose the first one that satisfies the new target tuple.
2738 : *
2739 : * 2. modify the target tuple so that the join quals no longer pass and
2740 : * hence the source tuple no longer has a match.
2741 : *
2742 : * In this case, the source tuple no longer matches the target tuple,
2743 : * so we now instead find a qualifying WHEN NOT MATCHED action to
2744 : * execute.
2745 : *
2746 : * XXX Hmmm, what if the updated tuple would now match one that was
2747 : * considered NOT MATCHED so far?
2748 : *
2749 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED.
2750 : *
2751 : * ExecMergeMatched takes care of following the update chain and
2752 : * re-finding the qualifying WHEN MATCHED action, as long as the updated
2753 : * target tuple still satisfies the join quals, i.e., it remains a WHEN
2754 : * MATCHED case. If the tuple gets deleted or the join quals fail, it
2755 : * returns and we try ExecMergeNotMatched. Given that ExecMergeMatched
2756 : * always make progress by following the update chain and we never switch
2757 : * from ExecMergeNotMatched to ExecMergeMatched, there is no risk of a
2758 : * livelock.
2759 : */
2760 5042 : matched = tupleid != NULL;
2761 5042 : if (matched)
2762 2986 : matched = ExecMergeMatched(context, resultRelInfo, tupleid, canSetTag);
2763 :
2764 : /*
2765 : * Either we were dealing with a NOT MATCHED tuple or ExecMergeMatched()
2766 : * returned "false", indicating the previously MATCHED tuple no longer
2767 : * matches.
2768 : */
2769 4992 : if (!matched)
2770 2072 : ExecMergeNotMatched(context, resultRelInfo, canSetTag);
2771 :
2772 : /* No RETURNING support yet */
2773 4962 : return NULL;
2774 : }
2775 :
2776 : /*
2777 : * Check and execute the first qualifying MATCHED action. The current target
2778 : * tuple is identified by tupleid.
2779 : *
2780 : * We start from the first WHEN MATCHED action and check if the WHEN quals
2781 : * pass, if any. If the WHEN quals for the first action do not pass, we
2782 : * check the second, then the third and so on. If we reach to the end, no
2783 : * action is taken and we return true, indicating that no further action is
2784 : * required for this tuple.
2785 : *
2786 : * If we do find a qualifying action, then we attempt to execute the action.
2787 : *
2788 : * If the tuple is concurrently updated, EvalPlanQual is run with the updated
2789 : * tuple to recheck the join quals. Note that the additional quals associated
2790 : * with individual actions are evaluated by this routine via ExecQual, while
2791 : * EvalPlanQual checks for the join quals. If EvalPlanQual tells us that the
2792 : * updated tuple still passes the join quals, then we restart from the first
2793 : * action to look for a qualifying action. Otherwise, we return false --
2794 : * meaning that a NOT MATCHED action must now be executed for the current
2795 : * source tuple.
2796 : */
2797 : static bool
2798 2986 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2799 : ItemPointer tupleid, bool canSetTag)
2800 : {
2801 2986 : ModifyTableState *mtstate = context->mtstate;
2802 : TupleTableSlot *newslot;
2803 2986 : EState *estate = context->estate;
2804 2986 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2805 : bool isNull;
2806 2986 : EPQState *epqstate = &mtstate->mt_epqstate;
2807 : ListCell *l;
2808 :
2809 : /*
2810 : * If there are no WHEN MATCHED actions, we are done.
2811 : */
2812 2986 : if (resultRelInfo->ri_matchedMergeAction == NIL)
2813 528 : return true;
2814 :
2815 : /*
2816 : * Make tuple and any needed join variables available to ExecQual and
2817 : * ExecProject. The target's existing tuple is installed in the scantuple.
2818 : * Again, this target relation's slot is required only in the case of a
2819 : * MATCHED tuple and UPDATE/DELETE actions.
2820 : */
2821 2458 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
2822 2458 : econtext->ecxt_innertuple = context->planSlot;
2823 2458 : econtext->ecxt_outertuple = NULL;
2824 :
2825 2494 : lmerge_matched:
2826 :
2827 : /*
2828 : * This routine is only invoked for matched rows, and we must have found
2829 : * the tupleid of the target row in that case; fetch that tuple.
2830 : *
2831 : * We use SnapshotAny for this because we might get called again after
2832 : * EvalPlanQual returns us a new tuple, which may not be visible to our
2833 : * MVCC snapshot.
2834 : */
2835 :
2836 2494 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2837 : tupleid,
2838 : SnapshotAny,
2839 : resultRelInfo->ri_oldTupleSlot))
2840 0 : elog(ERROR, "failed to fetch the target tuple");
2841 :
2842 3876 : foreach(l, resultRelInfo->ri_matchedMergeAction)
2843 : {
2844 3132 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
2845 3132 : CmdType commandType = relaction->mas_action->commandType;
2846 : TM_Result result;
2847 3132 : UpdateContext updateCxt = {0};
2848 :
2849 : /*
2850 : * Test condition, if any.
2851 : *
2852 : * In the absence of any condition, we perform the action
2853 : * unconditionally (no need to check separately since ExecQual() will
2854 : * return true if there are no conditions to evaluate).
2855 : */
2856 3132 : if (!ExecQual(relaction->mas_whenqual, econtext))
2857 1382 : continue;
2858 :
2859 : /*
2860 : * Check if the existing target tuple meets the USING checks of
2861 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
2862 : * error.
2863 : *
2864 : * The WITH CHECK quals are applied in ExecUpdate() and hence we need
2865 : * not do anything special to handle them.
2866 : *
2867 : * NOTE: We must do this after WHEN quals are evaluated, so that we
2868 : * check policies only when they matter.
2869 : */
2870 1750 : if (resultRelInfo->ri_WithCheckOptions)
2871 : {
2872 60 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
2873 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
2874 : resultRelInfo,
2875 : resultRelInfo->ri_oldTupleSlot,
2876 60 : context->mtstate->ps.state);
2877 : }
2878 :
2879 : /* Perform stated action */
2880 1726 : switch (commandType)
2881 : {
2882 1452 : case CMD_UPDATE:
2883 :
2884 : /*
2885 : * Project the output tuple, and use that to update the table.
2886 : * We don't need to filter out junk attributes, because the
2887 : * UPDATE action's targetlist doesn't have any.
2888 : */
2889 1452 : newslot = ExecProject(relaction->mas_proj);
2890 :
2891 1452 : context->relaction = relaction;
2892 1452 : if (!ExecUpdatePrologue(context, resultRelInfo,
2893 : tupleid, NULL, newslot, &result))
2894 : {
2895 18 : if (result == TM_Ok)
2896 28 : return true; /* "do nothing" */
2897 12 : break; /* concurrent update/delete */
2898 : }
2899 1434 : result = ExecUpdateAct(context, resultRelInfo, tupleid, NULL,
2900 : newslot, false, &updateCxt);
2901 1420 : if (result == TM_Ok && updateCxt.updated)
2902 : {
2903 1386 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
2904 : tupleid, NULL, newslot);
2905 1386 : mtstate->mt_merge_updated += 1;
2906 : }
2907 1420 : break;
2908 :
2909 262 : case CMD_DELETE:
2910 262 : context->relaction = relaction;
2911 262 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
2912 : NULL, NULL, &result))
2913 : {
2914 12 : if (result == TM_Ok)
2915 6 : return true; /* "do nothing" */
2916 6 : break; /* concurrent update/delete */
2917 : }
2918 250 : result = ExecDeleteAct(context, resultRelInfo, tupleid, false);
2919 250 : if (result == TM_Ok)
2920 : {
2921 238 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
2922 : false);
2923 238 : mtstate->mt_merge_deleted += 1;
2924 : }
2925 250 : break;
2926 :
2927 12 : case CMD_NOTHING:
2928 : /* Doing nothing is always OK */
2929 12 : result = TM_Ok;
2930 12 : break;
2931 :
2932 0 : default:
2933 0 : elog(ERROR, "unknown action in MERGE WHEN MATCHED clause");
2934 : }
2935 :
2936 1700 : switch (result)
2937 : {
2938 1636 : case TM_Ok:
2939 : /* all good; perform final actions */
2940 1636 : if (canSetTag && commandType != CMD_NOTHING)
2941 1624 : (estate->es_processed)++;
2942 :
2943 1636 : break;
2944 :
2945 12 : case TM_SelfModified:
2946 :
2947 : /*
2948 : * The SQL standard disallows this for MERGE.
2949 : */
2950 12 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
2951 12 : ereport(ERROR,
2952 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2953 : /* translator: %s is a SQL command name */
2954 : errmsg("%s command cannot affect row a second time",
2955 : "MERGE"),
2956 : errhint("Ensure that not more than one source row matches any one target row.")));
2957 : /* This shouldn't happen */
2958 0 : elog(ERROR, "attempted to update or delete invisible tuple");
2959 : break;
2960 :
2961 8 : case TM_Deleted:
2962 8 : if (IsolationUsesXactSnapshot())
2963 0 : ereport(ERROR,
2964 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2965 : errmsg("could not serialize access due to concurrent delete")));
2966 :
2967 : /*
2968 : * If the tuple was already deleted, return to let caller
2969 : * handle it under NOT MATCHED clauses.
2970 : */
2971 8 : return false;
2972 :
2973 44 : case TM_Updated:
2974 : {
2975 : Relation resultRelationDesc;
2976 : TupleTableSlot *epqslot,
2977 : *inputslot;
2978 : LockTupleMode lockmode;
2979 :
2980 : /*
2981 : * The target tuple was concurrently updated by some other
2982 : * transaction. Run EvalPlanQual() with the new version of
2983 : * the tuple. If it does not return a tuple, then we
2984 : * switch to the NOT MATCHED list of actions. If it does
2985 : * return a tuple and the join qual is still satisfied,
2986 : * then we just need to recheck the MATCHED actions,
2987 : * starting from the top, and execute the first qualifying
2988 : * action.
2989 : */
2990 44 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
2991 44 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
2992 :
2993 44 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
2994 : resultRelInfo->ri_RangeTableIndex);
2995 :
2996 44 : result = table_tuple_lock(resultRelationDesc, tupleid,
2997 : estate->es_snapshot,
2998 : inputslot, estate->es_output_cid,
2999 : lockmode, LockWaitBlock,
3000 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3001 : &context->tmfd);
3002 44 : switch (result)
3003 : {
3004 42 : case TM_Ok:
3005 42 : epqslot = EvalPlanQual(epqstate,
3006 : resultRelationDesc,
3007 : resultRelInfo->ri_RangeTableIndex,
3008 : inputslot);
3009 :
3010 : /*
3011 : * If we got no tuple, or the tuple we get has a
3012 : * NULL ctid, go back to caller: this one is not a
3013 : * MATCHED tuple anymore, so they can retry with
3014 : * NOT MATCHED actions.
3015 : */
3016 42 : if (TupIsNull(epqslot))
3017 0 : return false;
3018 :
3019 42 : (void) ExecGetJunkAttribute(epqslot,
3020 42 : resultRelInfo->ri_RowIdAttNo,
3021 : &isNull);
3022 42 : if (isNull)
3023 6 : return false;
3024 :
3025 : /*
3026 : * When a tuple was updated and migrated to
3027 : * another partition concurrently, the current
3028 : * MERGE implementation can't follow. There's
3029 : * probably a better way to handle this case, but
3030 : * it'd require recognizing the relation to which
3031 : * the tuple moved, and setting our current
3032 : * resultRelInfo to that.
3033 : */
3034 36 : if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid))
3035 0 : ereport(ERROR,
3036 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3037 : errmsg("tuple to be deleted was already moved to another partition due to concurrent update")));
3038 :
3039 : /*
3040 : * A non-NULL ctid means that we are still dealing
3041 : * with MATCHED case. Restart the loop so that we
3042 : * apply all the MATCHED rules again, to ensure
3043 : * that the first qualifying WHEN MATCHED action
3044 : * is executed.
3045 : *
3046 : * Update tupleid to that of the new tuple, for
3047 : * the refetch we do at the top.
3048 : */
3049 36 : ItemPointerCopy(&context->tmfd.ctid, tupleid);
3050 36 : goto lmerge_matched;
3051 :
3052 2 : case TM_Deleted:
3053 :
3054 : /*
3055 : * tuple already deleted; tell caller to run NOT
3056 : * MATCHED actions
3057 : */
3058 2 : return false;
3059 :
3060 0 : case TM_SelfModified:
3061 :
3062 : /*
3063 : * This can be reached when following an update
3064 : * chain from a tuple updated by another session,
3065 : * reaching a tuple that was already updated in
3066 : * this transaction. If previously modified by
3067 : * this command, ignore the redundant update,
3068 : * otherwise error out.
3069 : *
3070 : * See also response to TM_SelfModified in
3071 : * ExecUpdate().
3072 : */
3073 0 : if (context->tmfd.cmax != estate->es_output_cid)
3074 0 : ereport(ERROR,
3075 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3076 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3077 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3078 0 : return false;
3079 :
3080 0 : default:
3081 : /* see table_tuple_lock call in ExecDelete() */
3082 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3083 : result);
3084 : return false;
3085 : }
3086 : }
3087 :
3088 0 : case TM_Invisible:
3089 : case TM_WouldBlock:
3090 : case TM_BeingModified:
3091 : /* these should not occur */
3092 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3093 : break;
3094 : }
3095 :
3096 : /*
3097 : * We've activated one of the WHEN clauses, so we don't search
3098 : * further. This is required behaviour, not an optimization.
3099 : */
3100 1636 : break;
3101 : }
3102 :
3103 : /*
3104 : * Successfully executed an action or no qualifying action was found.
3105 : */
3106 2380 : return true;
3107 : }
3108 :
3109 : /*
3110 : * Execute the first qualifying NOT MATCHED action.
3111 : */
3112 : static void
3113 2072 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3114 : bool canSetTag)
3115 : {
3116 2072 : ModifyTableState *mtstate = context->mtstate;
3117 2072 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3118 2072 : List *actionStates = NIL;
3119 : ListCell *l;
3120 :
3121 : /*
3122 : * For INSERT actions, the root relation's merge action is OK since the
3123 : * INSERT's targetlist and the WHEN conditions can only refer to the
3124 : * source relation and hence it does not matter which result relation we
3125 : * work with.
3126 : *
3127 : * XXX does this mean that we can avoid creating copies of actionStates on
3128 : * partitioned tables, for not-matched actions?
3129 : */
3130 2072 : actionStates = resultRelInfo->ri_notMatchedMergeAction;
3131 :
3132 : /*
3133 : * Make source tuple available to ExecQual and ExecProject. We don't need
3134 : * the target tuple, since the WHEN quals and targetlist can't refer to
3135 : * the target columns.
3136 : */
3137 2072 : econtext->ecxt_scantuple = NULL;
3138 2072 : econtext->ecxt_innertuple = context->planSlot;
3139 2072 : econtext->ecxt_outertuple = NULL;
3140 :
3141 2636 : foreach(l, actionStates)
3142 : {
3143 2072 : MergeActionState *action = (MergeActionState *) lfirst(l);
3144 2072 : CmdType commandType = action->mas_action->commandType;
3145 : TupleTableSlot *newslot;
3146 :
3147 : /*
3148 : * Test condition, if any.
3149 : *
3150 : * In the absence of any condition, we perform the action
3151 : * unconditionally (no need to check separately since ExecQual() will
3152 : * return true if there are no conditions to evaluate).
3153 : */
3154 2072 : if (!ExecQual(action->mas_whenqual, econtext))
3155 564 : continue;
3156 :
3157 : /* Perform stated action */
3158 1508 : switch (commandType)
3159 : {
3160 1508 : case CMD_INSERT:
3161 :
3162 : /*
3163 : * Project the tuple. In case of a partitioned table, the
3164 : * projection was already built to use the root's descriptor,
3165 : * so we don't need to map the tuple here.
3166 : */
3167 1508 : newslot = ExecProject(action->mas_proj);
3168 1508 : context->relaction = action;
3169 :
3170 1508 : (void) ExecInsert(context, mtstate->rootResultRelInfo, newslot,
3171 : canSetTag, NULL, NULL);
3172 1478 : mtstate->mt_merge_inserted += 1;
3173 1478 : break;
3174 0 : case CMD_NOTHING:
3175 : /* Do nothing */
3176 0 : break;
3177 0 : default:
3178 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3179 : }
3180 :
3181 : /*
3182 : * We've activated one of the WHEN clauses, so we don't search
3183 : * further. This is required behaviour, not an optimization.
3184 : */
3185 1478 : break;
3186 : }
3187 2042 : }
3188 :
3189 : /*
3190 : * Initialize state for execution of MERGE.
3191 : */
3192 : void
3193 862 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3194 : {
3195 862 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3196 862 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3197 : ResultRelInfo *resultRelInfo;
3198 : ExprContext *econtext;
3199 : ListCell *lc;
3200 : int i;
3201 :
3202 862 : if (node->mergeActionLists == NIL)
3203 0 : return;
3204 :
3205 862 : mtstate->mt_merge_subcommands = 0;
3206 :
3207 862 : if (mtstate->ps.ps_ExprContext == NULL)
3208 862 : ExecAssignExprContext(estate, &mtstate->ps);
3209 862 : econtext = mtstate->ps.ps_ExprContext;
3210 :
3211 : /*
3212 : * Create a MergeActionState for each action on the mergeActionList and
3213 : * add it to either a list of matched actions or not-matched actions.
3214 : *
3215 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3216 : * anything here, do so there too.
3217 : */
3218 862 : i = 0;
3219 1852 : foreach(lc, node->mergeActionLists)
3220 : {
3221 990 : List *mergeActionList = lfirst(lc);
3222 : TupleDesc relationDesc;
3223 : ListCell *l;
3224 :
3225 990 : resultRelInfo = mtstate->resultRelInfo + i;
3226 990 : i++;
3227 990 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3228 :
3229 : /* initialize slots for MERGE fetches from this rel */
3230 990 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3231 990 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3232 :
3233 2578 : foreach(l, mergeActionList)
3234 : {
3235 1588 : MergeAction *action = (MergeAction *) lfirst(l);
3236 : MergeActionState *action_state;
3237 : TupleTableSlot *tgtslot;
3238 : TupleDesc tgtdesc;
3239 : List **list;
3240 :
3241 : /*
3242 : * Build action merge state for this rel. (For partitions,
3243 : * equivalent code exists in ExecInitPartitionInfo.)
3244 : */
3245 1588 : action_state = makeNode(MergeActionState);
3246 1588 : action_state->mas_action = action;
3247 1588 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3248 : &mtstate->ps);
3249 :
3250 : /*
3251 : * We create two lists - one for WHEN MATCHED actions and one for
3252 : * WHEN NOT MATCHED actions - and stick the MergeActionState into
3253 : * the appropriate list.
3254 : */
3255 1588 : if (action_state->mas_action->matched)
3256 954 : list = &resultRelInfo->ri_matchedMergeAction;
3257 : else
3258 634 : list = &resultRelInfo->ri_notMatchedMergeAction;
3259 1588 : *list = lappend(*list, action_state);
3260 :
3261 1588 : switch (action->commandType)
3262 : {
3263 626 : case CMD_INSERT:
3264 626 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3265 : action->targetList);
3266 :
3267 : /*
3268 : * If the MERGE targets a partitioned table, any INSERT
3269 : * actions must be routed through it, not the child
3270 : * relations. Initialize the routing struct and the root
3271 : * table's "new" tuple slot for that, if not already done.
3272 : * The projection we prepare, for all relations, uses the
3273 : * root relation descriptor, and targets the plan's root
3274 : * slot. (This is consistent with the fact that we
3275 : * checked the plan output to match the root relation,
3276 : * above.)
3277 : */
3278 626 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3279 : RELKIND_PARTITIONED_TABLE)
3280 : {
3281 186 : if (mtstate->mt_partition_tuple_routing == NULL)
3282 : {
3283 : /*
3284 : * Initialize planstate for routing if not already
3285 : * done.
3286 : *
3287 : * Note that the slot is managed as a standalone
3288 : * slot belonging to ModifyTableState, so we pass
3289 : * NULL for the 2nd argument.
3290 : */
3291 90 : mtstate->mt_root_tuple_slot =
3292 90 : table_slot_create(rootRelInfo->ri_RelationDesc,
3293 : NULL);
3294 90 : mtstate->mt_partition_tuple_routing =
3295 90 : ExecSetupPartitionTupleRouting(estate,
3296 : rootRelInfo->ri_RelationDesc);
3297 : }
3298 186 : tgtslot = mtstate->mt_root_tuple_slot;
3299 186 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3300 : }
3301 : else
3302 : {
3303 : /* not partitioned? use the stock relation and slot */
3304 440 : tgtslot = resultRelInfo->ri_newTupleSlot;
3305 440 : tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3306 : }
3307 :
3308 626 : action_state->mas_proj =
3309 626 : ExecBuildProjectionInfo(action->targetList, econtext,
3310 : tgtslot,
3311 : &mtstate->ps,
3312 : tgtdesc);
3313 :
3314 626 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3315 626 : break;
3316 728 : case CMD_UPDATE:
3317 728 : action_state->mas_proj =
3318 728 : ExecBuildUpdateProjection(action->targetList,
3319 : true,
3320 : action->updateColnos,
3321 : relationDesc,
3322 : econtext,
3323 : resultRelInfo->ri_newTupleSlot,
3324 : &mtstate->ps);
3325 728 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3326 728 : break;
3327 212 : case CMD_DELETE:
3328 212 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3329 212 : break;
3330 22 : case CMD_NOTHING:
3331 22 : break;
3332 0 : default:
3333 0 : elog(ERROR, "unknown operation");
3334 : break;
3335 : }
3336 : }
3337 : }
3338 : }
3339 :
3340 : /*
3341 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3342 : *
3343 : * We mark 'projectNewInfoValid' even though the projections themselves
3344 : * are not initialized here.
3345 : */
3346 : void
3347 1034 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3348 : ResultRelInfo *resultRelInfo)
3349 : {
3350 1034 : EState *estate = mtstate->ps.state;
3351 :
3352 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3353 :
3354 1034 : resultRelInfo->ri_oldTupleSlot =
3355 1034 : table_slot_create(resultRelInfo->ri_RelationDesc,
3356 : &estate->es_tupleTable);
3357 1034 : resultRelInfo->ri_newTupleSlot =
3358 1034 : table_slot_create(resultRelInfo->ri_RelationDesc,
3359 : &estate->es_tupleTable);
3360 1034 : resultRelInfo->ri_projectNewInfoValid = true;
3361 1034 : }
3362 :
3363 : /*
3364 : * Process BEFORE EACH STATEMENT triggers
3365 : */
3366 : static void
3367 133448 : fireBSTriggers(ModifyTableState *node)
3368 : {
3369 133448 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3370 133448 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3371 :
3372 133448 : switch (node->operation)
3373 : {
3374 105420 : case CMD_INSERT:
3375 105420 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3376 105408 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3377 828 : ExecBSUpdateTriggers(node->ps.state,
3378 : resultRelInfo);
3379 105408 : break;
3380 15442 : case CMD_UPDATE:
3381 15442 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3382 15442 : break;
3383 11778 : case CMD_DELETE:
3384 11778 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3385 11778 : break;
3386 808 : case CMD_MERGE:
3387 808 : if (node->mt_merge_subcommands & MERGE_INSERT)
3388 476 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3389 808 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3390 528 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3391 808 : if (node->mt_merge_subcommands & MERGE_DELETE)
3392 176 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3393 808 : break;
3394 0 : default:
3395 0 : elog(ERROR, "unknown operation");
3396 : break;
3397 : }
3398 133436 : }
3399 :
3400 : /*
3401 : * Process AFTER EACH STATEMENT triggers
3402 : */
3403 : static void
3404 130652 : fireASTriggers(ModifyTableState *node)
3405 : {
3406 130652 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3407 130652 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3408 :
3409 130652 : switch (node->operation)
3410 : {
3411 103392 : case CMD_INSERT:
3412 103392 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3413 726 : ExecASUpdateTriggers(node->ps.state,
3414 : resultRelInfo,
3415 726 : node->mt_oc_transition_capture);
3416 103392 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3417 103392 : node->mt_transition_capture);
3418 103392 : break;
3419 14876 : case CMD_UPDATE:
3420 14876 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3421 14876 : node->mt_transition_capture);
3422 14876 : break;
3423 11656 : case CMD_DELETE:
3424 11656 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3425 11656 : node->mt_transition_capture);
3426 11656 : break;
3427 728 : case CMD_MERGE:
3428 728 : if (node->mt_merge_subcommands & MERGE_DELETE)
3429 146 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3430 146 : node->mt_transition_capture);
3431 728 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3432 478 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3433 478 : node->mt_transition_capture);
3434 728 : if (node->mt_merge_subcommands & MERGE_INSERT)
3435 444 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3436 444 : node->mt_transition_capture);
3437 728 : break;
3438 0 : default:
3439 0 : elog(ERROR, "unknown operation");
3440 : break;
3441 : }
3442 130652 : }
3443 :
3444 : /*
3445 : * Set up the state needed for collecting transition tuples for AFTER
3446 : * triggers.
3447 : */
3448 : static void
3449 133740 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
3450 : {
3451 133740 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
3452 133740 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
3453 :
3454 : /* Check for transition tables on the directly targeted relation. */
3455 133740 : mtstate->mt_transition_capture =
3456 133740 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3457 133740 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3458 : mtstate->operation);
3459 133740 : if (plan->operation == CMD_INSERT &&
3460 105422 : plan->onConflictAction == ONCONFLICT_UPDATE)
3461 828 : mtstate->mt_oc_transition_capture =
3462 828 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3463 828 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3464 : CMD_UPDATE);
3465 133740 : }
3466 :
3467 : /*
3468 : * ExecPrepareTupleRouting --- prepare for routing one tuple
3469 : *
3470 : * Determine the partition in which the tuple in slot is to be inserted,
3471 : * and return its ResultRelInfo in *partRelInfo. The return value is
3472 : * a slot holding the tuple of the partition rowtype.
3473 : *
3474 : * This also sets the transition table information in mtstate based on the
3475 : * selected partition.
3476 : */
3477 : static TupleTableSlot *
3478 721036 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
3479 : EState *estate,
3480 : PartitionTupleRouting *proute,
3481 : ResultRelInfo *targetRelInfo,
3482 : TupleTableSlot *slot,
3483 : ResultRelInfo **partRelInfo)
3484 : {
3485 : ResultRelInfo *partrel;
3486 : TupleConversionMap *map;
3487 :
3488 : /*
3489 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3490 : * not find a valid partition for the tuple in 'slot' then an error is
3491 : * raised. An error may also be raised if the found partition is not a
3492 : * valid target for INSERTs. This is required since a partitioned table
3493 : * UPDATE to another partition becomes a DELETE+INSERT.
3494 : */
3495 721036 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3496 :
3497 : /*
3498 : * If we're capturing transition tuples, we might need to convert from the
3499 : * partition rowtype to root partitioned table's rowtype. But if there
3500 : * are no BEFORE triggers on the partition that could change the tuple, we
3501 : * can just remember the original unconverted tuple to avoid a needless
3502 : * round trip conversion.
3503 : */
3504 720838 : if (mtstate->mt_transition_capture != NULL)
3505 : {
3506 : bool has_before_insert_row_trig;
3507 :
3508 168 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3509 42 : partrel->ri_TrigDesc->trig_insert_before_row);
3510 :
3511 126 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
3512 126 : !has_before_insert_row_trig ? slot : NULL;
3513 : }
3514 :
3515 : /*
3516 : * Convert the tuple, if necessary.
3517 : */
3518 720838 : map = ExecGetRootToChildMap(partrel, estate);
3519 720838 : if (map != NULL)
3520 : {
3521 68310 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3522 :
3523 68310 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3524 : }
3525 :
3526 720838 : *partRelInfo = partrel;
3527 720838 : return slot;
3528 : }
3529 :
3530 : /* ----------------------------------------------------------------
3531 : * ExecModifyTable
3532 : *
3533 : * Perform table modifications as required, and return RETURNING results
3534 : * if needed.
3535 : * ----------------------------------------------------------------
3536 : */
3537 : static TupleTableSlot *
3538 141212 : ExecModifyTable(PlanState *pstate)
3539 : {
3540 141212 : ModifyTableState *node = castNode(ModifyTableState, pstate);
3541 : ModifyTableContext context;
3542 141212 : EState *estate = node->ps.state;
3543 141212 : CmdType operation = node->operation;
3544 : ResultRelInfo *resultRelInfo;
3545 : PlanState *subplanstate;
3546 : TupleTableSlot *slot;
3547 : TupleTableSlot *oldSlot;
3548 : ItemPointerData tuple_ctid;
3549 : HeapTupleData oldtupdata;
3550 : HeapTuple oldtuple;
3551 : ItemPointer tupleid;
3552 :
3553 141212 : CHECK_FOR_INTERRUPTS();
3554 :
3555 : /*
3556 : * This should NOT get called during EvalPlanQual; we should have passed a
3557 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
3558 : * Assert because this condition is easy to miss in testing. (Note:
3559 : * although ModifyTable should not get executed within an EvalPlanQual
3560 : * operation, we do have to allow it to be initialized and shut down in
3561 : * case it is within a CTE subplan. Hence this test must be here, not in
3562 : * ExecInitModifyTable.)
3563 : */
3564 141212 : if (estate->es_epq_active != NULL)
3565 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
3566 :
3567 : /*
3568 : * If we've already completed processing, don't try to do more. We need
3569 : * this test because ExecPostprocessPlan might call us an extra time, and
3570 : * our subplan's nodes aren't necessarily robust against being called
3571 : * extra times.
3572 : */
3573 141212 : if (node->mt_done)
3574 770 : return NULL;
3575 :
3576 : /*
3577 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
3578 : */
3579 140442 : if (node->fireBSTriggers)
3580 : {
3581 133448 : fireBSTriggers(node);
3582 133436 : node->fireBSTriggers = false;
3583 : }
3584 :
3585 : /* Preload local variables */
3586 140430 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
3587 140430 : subplanstate = outerPlanState(node);
3588 :
3589 : /* Set global context */
3590 140430 : context.mtstate = node;
3591 140430 : context.epqstate = &node->mt_epqstate;
3592 140430 : context.estate = estate;
3593 :
3594 : /*
3595 : * Fetch rows from subplan, and execute the required table modification
3596 : * for each row.
3597 : */
3598 : for (;;)
3599 : {
3600 : /*
3601 : * Reset the per-output-tuple exprcontext. This is needed because
3602 : * triggers expect to use that context as workspace. It's a bit ugly
3603 : * to do this below the top level of the plan, however. We might need
3604 : * to rethink this later.
3605 : */
3606 14232304 : ResetPerTupleExprContext(estate);
3607 :
3608 : /*
3609 : * Reset per-tuple memory context used for processing on conflict and
3610 : * returning clauses, to free any expression evaluation storage
3611 : * allocated in the previous cycle.
3612 : */
3613 14232304 : if (pstate->ps_ExprContext)
3614 390236 : ResetExprContext(pstate->ps_ExprContext);
3615 :
3616 14232304 : context.planSlot = ExecProcNode(subplanstate);
3617 :
3618 : /* No more tuples to process? */
3619 14231970 : if (TupIsNull(context.planSlot))
3620 : break;
3621 :
3622 : /*
3623 : * When there are multiple result relations, each tuple contains a
3624 : * junk column that gives the OID of the rel from which it came.
3625 : * Extract it and select the correct result relation.
3626 : */
3627 14101318 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
3628 : {
3629 : Datum datum;
3630 : bool isNull;
3631 : Oid resultoid;
3632 :
3633 4336 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
3634 : &isNull);
3635 4336 : if (isNull)
3636 : {
3637 : /*
3638 : * For commands other than MERGE, any tuples having InvalidOid
3639 : * for tableoid are errors. For MERGE, we may need to handle
3640 : * them as WHEN NOT MATCHED clauses if any, so do that.
3641 : *
3642 : * Note that we use the node's toplevel resultRelInfo, not any
3643 : * specific partition's.
3644 : */
3645 370 : if (operation == CMD_MERGE)
3646 : {
3647 370 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3648 :
3649 370 : ExecMerge(&context, node->resultRelInfo, NULL, node->canSetTag);
3650 370 : continue; /* no RETURNING support yet */
3651 : }
3652 :
3653 0 : elog(ERROR, "tableoid is NULL");
3654 : }
3655 3966 : resultoid = DatumGetObjectId(datum);
3656 :
3657 : /* If it's not the same as last time, we need to locate the rel */
3658 3966 : if (resultoid != node->mt_lastResultOid)
3659 2680 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
3660 : false, true);
3661 : }
3662 :
3663 : /*
3664 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
3665 : * here is compute the RETURNING expressions.
3666 : */
3667 14100948 : if (resultRelInfo->ri_usesFdwDirectModify)
3668 : {
3669 : Assert(resultRelInfo->ri_projectReturning);
3670 :
3671 : /*
3672 : * A scan slot containing the data that was actually inserted,
3673 : * updated or deleted has already been made available to
3674 : * ExecProcessReturning by IterateDirectModify, so no need to
3675 : * provide it here.
3676 : */
3677 694 : slot = ExecProcessReturning(resultRelInfo, NULL, context.planSlot);
3678 :
3679 694 : return slot;
3680 : }
3681 :
3682 14100254 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3683 14100254 : slot = context.planSlot;
3684 :
3685 14100254 : tupleid = NULL;
3686 14100254 : oldtuple = NULL;
3687 :
3688 : /*
3689 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
3690 : * to be updated/deleted/merged. For a heap relation, that's a TID;
3691 : * otherwise we may have a wholerow junk attr that carries the old
3692 : * tuple in toto. Keep this in step with the part of
3693 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
3694 : */
3695 14100254 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
3696 : operation == CMD_MERGE)
3697 : {
3698 : char relkind;
3699 : Datum datum;
3700 : bool isNull;
3701 :
3702 2057642 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
3703 2057642 : if (relkind == RELKIND_RELATION ||
3704 354 : relkind == RELKIND_MATVIEW ||
3705 : relkind == RELKIND_PARTITIONED_TABLE)
3706 : {
3707 : /* ri_RowIdAttNo refers to a ctid attribute */
3708 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
3709 2057294 : datum = ExecGetJunkAttribute(slot,
3710 2057294 : resultRelInfo->ri_RowIdAttNo,
3711 : &isNull);
3712 :
3713 : /*
3714 : * For commands other than MERGE, any tuples having a null row
3715 : * identifier are errors. For MERGE, we may need to handle
3716 : * them as WHEN NOT MATCHED clauses if any, so do that.
3717 : *
3718 : * Note that we use the node's toplevel resultRelInfo, not any
3719 : * specific partition's.
3720 : */
3721 2057294 : if (isNull)
3722 : {
3723 1686 : if (operation == CMD_MERGE)
3724 : {
3725 1686 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
3726 :
3727 1686 : ExecMerge(&context, node->resultRelInfo, NULL, node->canSetTag);
3728 1656 : continue; /* no RETURNING support yet */
3729 : }
3730 :
3731 0 : elog(ERROR, "ctid is NULL");
3732 : }
3733 :
3734 2055608 : tupleid = (ItemPointer) DatumGetPointer(datum);
3735 2055608 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
3736 2055608 : tupleid = &tuple_ctid;
3737 : }
3738 :
3739 : /*
3740 : * Use the wholerow attribute, when available, to reconstruct the
3741 : * old relation tuple. The old tuple serves one or both of two
3742 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
3743 : * provides values for any unchanged columns for the NEW tuple of
3744 : * an UPDATE, because the subplan does not produce all the columns
3745 : * of the target table.
3746 : *
3747 : * Note that the wholerow attribute does not carry system columns,
3748 : * so foreign table triggers miss seeing those, except that we
3749 : * know enough here to set t_tableOid. Quite separately from
3750 : * this, the FDW may fetch its own junk attrs to identify the row.
3751 : *
3752 : * Other relevant relkinds, currently limited to views, always
3753 : * have a wholerow attribute.
3754 : */
3755 348 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
3756 : {
3757 330 : datum = ExecGetJunkAttribute(slot,
3758 330 : resultRelInfo->ri_RowIdAttNo,
3759 : &isNull);
3760 : /* shouldn't ever get a null result... */
3761 330 : if (isNull)
3762 0 : elog(ERROR, "wholerow is NULL");
3763 :
3764 330 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
3765 330 : oldtupdata.t_len =
3766 330 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
3767 330 : ItemPointerSetInvalid(&(oldtupdata.t_self));
3768 : /* Historically, view triggers see invalid t_tableOid. */
3769 330 : oldtupdata.t_tableOid =
3770 330 : (relkind == RELKIND_VIEW) ? InvalidOid :
3771 162 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
3772 :
3773 330 : oldtuple = &oldtupdata;
3774 : }
3775 : else
3776 : {
3777 : /* Only foreign tables are allowed to omit a row-ID attr */
3778 : Assert(relkind == RELKIND_FOREIGN_TABLE);
3779 : }
3780 : }
3781 :
3782 14098568 : switch (operation)
3783 : {
3784 12042612 : case CMD_INSERT:
3785 : /* Initialize projection info if first time for this table */
3786 12042612 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3787 101772 : ExecInitInsertProjection(node, resultRelInfo);
3788 12042612 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
3789 12042612 : slot = ExecInsert(&context, resultRelInfo, slot,
3790 12042612 : node->canSetTag, NULL, NULL);
3791 12040728 : break;
3792 :
3793 366684 : case CMD_UPDATE:
3794 : /* Initialize projection info if first time for this table */
3795 366684 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3796 15206 : ExecInitUpdateProjection(node, resultRelInfo);
3797 :
3798 : /*
3799 : * Make the new tuple by combining plan's output tuple with
3800 : * the old tuple being updated.
3801 : */
3802 366684 : oldSlot = resultRelInfo->ri_oldTupleSlot;
3803 366684 : if (oldtuple != NULL)
3804 : {
3805 : /* Use the wholerow junk attr as the old tuple. */
3806 258 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
3807 : }
3808 : else
3809 : {
3810 : /* Fetch the most recent version of old tuple. */
3811 366426 : Relation relation = resultRelInfo->ri_RelationDesc;
3812 :
3813 366426 : if (!table_tuple_fetch_row_version(relation, tupleid,
3814 : SnapshotAny,
3815 : oldSlot))
3816 0 : elog(ERROR, "failed to fetch tuple being updated");
3817 : }
3818 366684 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
3819 : oldSlot);
3820 366684 : context.relaction = NULL;
3821 :
3822 : /* Now apply the update. */
3823 366684 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
3824 366684 : slot, node->canSetTag);
3825 366290 : break;
3826 :
3827 1686286 : case CMD_DELETE:
3828 1686286 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
3829 1686286 : true, false, node->canSetTag, NULL, NULL);
3830 1686206 : break;
3831 :
3832 2986 : case CMD_MERGE:
3833 2986 : slot = ExecMerge(&context, resultRelInfo, tupleid, node->canSetTag);
3834 2936 : break;
3835 :
3836 0 : default:
3837 0 : elog(ERROR, "unknown operation");
3838 : break;
3839 : }
3840 :
3841 : /*
3842 : * If we got a RETURNING result, return it to caller. We'll continue
3843 : * the work on next call.
3844 : */
3845 14096160 : if (slot)
3846 6312 : return slot;
3847 : }
3848 :
3849 : /*
3850 : * Insert remaining tuples for batch insert.
3851 : */
3852 130652 : if (estate->es_insert_pending_result_relations != NIL)
3853 26 : ExecPendingInserts(estate);
3854 :
3855 : /*
3856 : * We're done, but fire AFTER STATEMENT triggers before exiting.
3857 : */
3858 130652 : fireASTriggers(node);
3859 :
3860 130652 : node->mt_done = true;
3861 :
3862 130652 : return NULL;
3863 : }
3864 :
3865 : /*
3866 : * ExecLookupResultRelByOid
3867 : * If the table with given OID is among the result relations to be
3868 : * updated by the given ModifyTable node, return its ResultRelInfo.
3869 : *
3870 : * If not found, return NULL if missing_ok, else raise error.
3871 : *
3872 : * If update_cache is true, then upon successful lookup, update the node's
3873 : * one-element cache. ONLY ExecModifyTable may pass true for this.
3874 : */
3875 : ResultRelInfo *
3876 10672 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
3877 : bool missing_ok, bool update_cache)
3878 : {
3879 10672 : if (node->mt_resultOidHash)
3880 : {
3881 : /* Use the pre-built hash table to locate the rel */
3882 : MTTargetRelLookup *mtlookup;
3883 :
3884 : mtlookup = (MTTargetRelLookup *)
3885 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
3886 0 : if (mtlookup)
3887 : {
3888 0 : if (update_cache)
3889 : {
3890 0 : node->mt_lastResultOid = resultoid;
3891 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
3892 : }
3893 0 : return node->resultRelInfo + mtlookup->relationIndex;
3894 : }
3895 : }
3896 : else
3897 : {
3898 : /* With few target rels, just search the ResultRelInfo array */
3899 20414 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
3900 : {
3901 12808 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
3902 :
3903 12808 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
3904 : {
3905 3066 : if (update_cache)
3906 : {
3907 2680 : node->mt_lastResultOid = resultoid;
3908 2680 : node->mt_lastResultIndex = ndx;
3909 : }
3910 3066 : return rInfo;
3911 : }
3912 : }
3913 : }
3914 :
3915 7606 : if (!missing_ok)
3916 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
3917 7606 : return NULL;
3918 : }
3919 :
3920 : /* ----------------------------------------------------------------
3921 : * ExecInitModifyTable
3922 : * ----------------------------------------------------------------
3923 : */
3924 : ModifyTableState *
3925 134480 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
3926 : {
3927 : ModifyTableState *mtstate;
3928 134480 : Plan *subplan = outerPlan(node);
3929 134480 : CmdType operation = node->operation;
3930 134480 : int nrels = list_length(node->resultRelations);
3931 : ResultRelInfo *resultRelInfo;
3932 : List *arowmarks;
3933 : ListCell *l;
3934 : int i;
3935 : Relation rel;
3936 :
3937 : /* check for unsupported flags */
3938 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
3939 :
3940 : /*
3941 : * create state structure
3942 : */
3943 134480 : mtstate = makeNode(ModifyTableState);
3944 134480 : mtstate->ps.plan = (Plan *) node;
3945 134480 : mtstate->ps.state = estate;
3946 134480 : mtstate->ps.ExecProcNode = ExecModifyTable;
3947 :
3948 134480 : mtstate->operation = operation;
3949 134480 : mtstate->canSetTag = node->canSetTag;
3950 134480 : mtstate->mt_done = false;
3951 :
3952 134480 : mtstate->mt_nrels = nrels;
3953 134480 : mtstate->resultRelInfo = (ResultRelInfo *)
3954 134480 : palloc(nrels * sizeof(ResultRelInfo));
3955 :
3956 134480 : mtstate->mt_merge_inserted = 0;
3957 134480 : mtstate->mt_merge_updated = 0;
3958 134480 : mtstate->mt_merge_deleted = 0;
3959 :
3960 : /*----------
3961 : * Resolve the target relation. This is the same as:
3962 : *
3963 : * - the relation for which we will fire FOR STATEMENT triggers,
3964 : * - the relation into whose tuple format all captured transition tuples
3965 : * must be converted, and
3966 : * - the root partitioned table used for tuple routing.
3967 : *
3968 : * If it's a partitioned table, the root partition doesn't appear
3969 : * elsewhere in the plan and its RT index is given explicitly in
3970 : * node->rootRelation. Otherwise (i.e. table inheritance) the target
3971 : * relation is the first relation in the node->resultRelations list.
3972 : *----------
3973 : */
3974 134480 : if (node->rootRelation > 0)
3975 : {
3976 7220 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
3977 7220 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
3978 : node->rootRelation);
3979 : }
3980 : else
3981 : {
3982 127260 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
3983 127260 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
3984 127260 : linitial_int(node->resultRelations));
3985 : }
3986 :
3987 : /* set up epqstate with dummy subplan data for the moment */
3988 134480 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
3989 : node->epqParam, node->resultRelations);
3990 134480 : mtstate->fireBSTriggers = true;
3991 :
3992 : /*
3993 : * Build state for collecting transition tuples. This requires having a
3994 : * valid trigger query context, so skip it in explain-only mode.
3995 : */
3996 134480 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
3997 133740 : ExecSetupTransitionCaptureState(mtstate, estate);
3998 :
3999 : /*
4000 : * Open all the result relations and initialize the ResultRelInfo structs.
4001 : * (But root relation was initialized above, if it's part of the array.)
4002 : * We must do this before initializing the subplan, because direct-modify
4003 : * FDWs expect their ResultRelInfos to be available.
4004 : */
4005 134480 : resultRelInfo = mtstate->resultRelInfo;
4006 134480 : i = 0;
4007 270896 : foreach(l, node->resultRelations)
4008 : {
4009 136684 : Index resultRelation = lfirst_int(l);
4010 :
4011 136684 : if (resultRelInfo != mtstate->rootResultRelInfo)
4012 : {
4013 9424 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4014 :
4015 : /*
4016 : * For child result relations, store the root result relation
4017 : * pointer. We do so for the convenience of places that want to
4018 : * look at the query's original target relation but don't have the
4019 : * mtstate handy.
4020 : */
4021 9424 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4022 : }
4023 :
4024 : /* Initialize the usesFdwDirectModify flag */
4025 136684 : resultRelInfo->ri_usesFdwDirectModify =
4026 136684 : bms_is_member(i, node->fdwDirectModifyPlans);
4027 :
4028 : /*
4029 : * Verify result relation is a valid target for the current operation
4030 : */
4031 136684 : CheckValidResultRel(resultRelInfo, operation);
4032 :
4033 136416 : resultRelInfo++;
4034 136416 : i++;
4035 : }
4036 :
4037 : /*
4038 : * Now we may initialize the subplan.
4039 : */
4040 134212 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4041 :
4042 : /*
4043 : * Do additional per-result-relation initialization.
4044 : */
4045 270594 : for (i = 0; i < nrels; i++)
4046 : {
4047 136382 : resultRelInfo = &mtstate->resultRelInfo[i];
4048 :
4049 : /* Let FDWs init themselves for foreign-table result rels */
4050 136382 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4051 136174 : resultRelInfo->ri_FdwRoutine != NULL &&
4052 308 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4053 : {
4054 308 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4055 :
4056 308 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4057 : resultRelInfo,
4058 : fdw_private,
4059 : i,
4060 : eflags);
4061 : }
4062 :
4063 : /*
4064 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4065 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4066 : * tables, the FDW might have created additional junk attr(s), but
4067 : * those are no concern of ours.
4068 : */
4069 136382 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4070 : operation == CMD_MERGE)
4071 : {
4072 : char relkind;
4073 :
4074 30742 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4075 30742 : if (relkind == RELKIND_RELATION ||
4076 568 : relkind == RELKIND_MATVIEW ||
4077 : relkind == RELKIND_PARTITIONED_TABLE)
4078 : {
4079 30210 : resultRelInfo->ri_RowIdAttNo =
4080 30210 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4081 30210 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4082 0 : elog(ERROR, "could not find junk ctid column");
4083 : }
4084 532 : else if (relkind == RELKIND_FOREIGN_TABLE)
4085 : {
4086 : /*
4087 : * We don't support MERGE with foreign tables for now. (It's
4088 : * problematic because the implementation uses CTID.)
4089 : */
4090 : Assert(operation != CMD_MERGE);
4091 :
4092 : /*
4093 : * When there is a row-level trigger, there should be a
4094 : * wholerow attribute. We also require it to be present in
4095 : * UPDATE and MERGE, so we can get the values of unchanged
4096 : * columns.
4097 : */
4098 340 : resultRelInfo->ri_RowIdAttNo =
4099 340 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4100 : "wholerow");
4101 340 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4102 190 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4103 0 : elog(ERROR, "could not find junk wholerow column");
4104 : }
4105 : else
4106 : {
4107 : /* No support for MERGE */
4108 : Assert(operation != CMD_MERGE);
4109 : /* Other valid target relkinds must provide wholerow */
4110 192 : resultRelInfo->ri_RowIdAttNo =
4111 192 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4112 : "wholerow");
4113 192 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4114 0 : elog(ERROR, "could not find junk wholerow column");
4115 : }
4116 : }
4117 : }
4118 :
4119 : /*
4120 : * If this is an inherited update/delete/merge, there will be a junk
4121 : * attribute named "tableoid" present in the subplan's targetlist. It
4122 : * will be used to identify the result relation for a given tuple to be
4123 : * updated/deleted/merged.
4124 : */
4125 134212 : mtstate->mt_resultOidAttno =
4126 134212 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4127 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || nrels == 1);
4128 134212 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4129 134212 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4130 :
4131 : /* Get the root target relation */
4132 134212 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4133 :
4134 : /*
4135 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4136 : * or MERGE might need this too, but only if it actually moves tuples
4137 : * between partitions; in that case setup is done by
4138 : * ExecCrossPartitionUpdate.
4139 : */
4140 134212 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4141 : operation == CMD_INSERT)
4142 5438 : mtstate->mt_partition_tuple_routing =
4143 5438 : ExecSetupPartitionTupleRouting(estate, rel);
4144 :
4145 : /*
4146 : * Initialize any WITH CHECK OPTION constraints if needed.
4147 : */
4148 134212 : resultRelInfo = mtstate->resultRelInfo;
4149 135450 : foreach(l, node->withCheckOptionLists)
4150 : {
4151 1238 : List *wcoList = (List *) lfirst(l);
4152 1238 : List *wcoExprs = NIL;
4153 : ListCell *ll;
4154 :
4155 3284 : foreach(ll, wcoList)
4156 : {
4157 2046 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4158 2046 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4159 : &mtstate->ps);
4160 :
4161 2046 : wcoExprs = lappend(wcoExprs, wcoExpr);
4162 : }
4163 :
4164 1238 : resultRelInfo->ri_WithCheckOptions = wcoList;
4165 1238 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4166 1238 : resultRelInfo++;
4167 : }
4168 :
4169 : /*
4170 : * Initialize RETURNING projections if needed.
4171 : */
4172 134212 : if (node->returningLists)
4173 : {
4174 : TupleTableSlot *slot;
4175 : ExprContext *econtext;
4176 :
4177 : /*
4178 : * Initialize result tuple slot and assign its rowtype using the first
4179 : * RETURNING list. We assume the rest will look the same.
4180 : */
4181 3980 : mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
4182 :
4183 : /* Set up a slot for the output of the RETURNING projection(s) */
4184 3980 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
4185 3980 : slot = mtstate->ps.ps_ResultTupleSlot;
4186 :
4187 : /* Need an econtext too */
4188 3980 : if (mtstate->ps.ps_ExprContext == NULL)
4189 3980 : ExecAssignExprContext(estate, &mtstate->ps);
4190 3980 : econtext = mtstate->ps.ps_ExprContext;
4191 :
4192 : /*
4193 : * Build a projection for each result rel.
4194 : */
4195 3980 : resultRelInfo = mtstate->resultRelInfo;
4196 8258 : foreach(l, node->returningLists)
4197 : {
4198 4278 : List *rlist = (List *) lfirst(l);
4199 :
4200 4278 : resultRelInfo->ri_returningList = rlist;
4201 4278 : resultRelInfo->ri_projectReturning =
4202 4278 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
4203 4278 : resultRelInfo->ri_RelationDesc->rd_att);
4204 4278 : resultRelInfo++;
4205 : }
4206 : }
4207 : else
4208 : {
4209 : /*
4210 : * We still must construct a dummy result tuple type, because InitPlan
4211 : * expects one (maybe should change that?).
4212 : */
4213 130232 : mtstate->ps.plan->targetlist = NIL;
4214 130232 : ExecInitResultTypeTL(&mtstate->ps);
4215 :
4216 130232 : mtstate->ps.ps_ExprContext = NULL;
4217 : }
4218 :
4219 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
4220 134212 : resultRelInfo = mtstate->resultRelInfo;
4221 134212 : if (node->onConflictAction != ONCONFLICT_NONE)
4222 : {
4223 : /* insert may only have one relation, inheritance is not expanded */
4224 : Assert(nrels == 1);
4225 1188 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4226 : }
4227 :
4228 : /*
4229 : * If needed, Initialize target list, projection and qual for ON CONFLICT
4230 : * DO UPDATE.
4231 : */
4232 134212 : if (node->onConflictAction == ONCONFLICT_UPDATE)
4233 : {
4234 900 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
4235 : ExprContext *econtext;
4236 : TupleDesc relationDesc;
4237 :
4238 : /* already exists if created by RETURNING processing above */
4239 900 : if (mtstate->ps.ps_ExprContext == NULL)
4240 632 : ExecAssignExprContext(estate, &mtstate->ps);
4241 :
4242 900 : econtext = mtstate->ps.ps_ExprContext;
4243 900 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4244 :
4245 : /* create state for DO UPDATE SET operation */
4246 900 : resultRelInfo->ri_onConflict = onconfl;
4247 :
4248 : /* initialize slot for the existing tuple */
4249 900 : onconfl->oc_Existing =
4250 900 : table_slot_create(resultRelInfo->ri_RelationDesc,
4251 900 : &mtstate->ps.state->es_tupleTable);
4252 :
4253 : /*
4254 : * Create the tuple slot for the UPDATE SET projection. We want a slot
4255 : * of the table's type here, because the slot will be used to insert
4256 : * into the table, and for RETURNING processing - which may access
4257 : * system attributes.
4258 : */
4259 900 : onconfl->oc_ProjSlot =
4260 900 : table_slot_create(resultRelInfo->ri_RelationDesc,
4261 900 : &mtstate->ps.state->es_tupleTable);
4262 :
4263 : /* build UPDATE SET projection state */
4264 900 : onconfl->oc_ProjInfo =
4265 900 : ExecBuildUpdateProjection(node->onConflictSet,
4266 : true,
4267 : node->onConflictCols,
4268 : relationDesc,
4269 : econtext,
4270 : onconfl->oc_ProjSlot,
4271 : &mtstate->ps);
4272 :
4273 : /* initialize state to evaluate the WHERE clause, if any */
4274 900 : if (node->onConflictWhere)
4275 : {
4276 : ExprState *qualexpr;
4277 :
4278 176 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
4279 : &mtstate->ps);
4280 176 : onconfl->oc_WhereClause = qualexpr;
4281 : }
4282 : }
4283 :
4284 : /*
4285 : * If we have any secondary relations in an UPDATE or DELETE, they need to
4286 : * be treated like non-locked relations in SELECT FOR UPDATE, ie, the
4287 : * EvalPlanQual mechanism needs to be told about them. Locate the
4288 : * relevant ExecRowMarks.
4289 : */
4290 134212 : arowmarks = NIL;
4291 135680 : foreach(l, node->rowMarks)
4292 : {
4293 1468 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
4294 : ExecRowMark *erm;
4295 : ExecAuxRowMark *aerm;
4296 :
4297 : /* ignore "parent" rowmarks; they are irrelevant at runtime */
4298 1468 : if (rc->isParent)
4299 94 : continue;
4300 :
4301 : /* Find ExecRowMark and build ExecAuxRowMark */
4302 1374 : erm = ExecFindRowMark(estate, rc->rti, false);
4303 1374 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4304 1374 : arowmarks = lappend(arowmarks, aerm);
4305 : }
4306 :
4307 : /* For a MERGE command, initialize its state */
4308 134212 : if (mtstate->operation == CMD_MERGE)
4309 862 : ExecInitMerge(mtstate, estate);
4310 :
4311 134212 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4312 :
4313 : /*
4314 : * If there are a lot of result relations, use a hash table to speed the
4315 : * lookups. If there are not a lot, a simple linear search is faster.
4316 : *
4317 : * It's not clear where the threshold is, but try 64 for starters. In a
4318 : * debugging build, use a small threshold so that we get some test
4319 : * coverage of both code paths.
4320 : */
4321 : #ifdef USE_ASSERT_CHECKING
4322 : #define MT_NRELS_HASH 4
4323 : #else
4324 : #define MT_NRELS_HASH 64
4325 : #endif
4326 134212 : if (nrels >= MT_NRELS_HASH)
4327 : {
4328 : HASHCTL hash_ctl;
4329 :
4330 0 : hash_ctl.keysize = sizeof(Oid);
4331 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4332 0 : hash_ctl.hcxt = CurrentMemoryContext;
4333 0 : mtstate->mt_resultOidHash =
4334 0 : hash_create("ModifyTable target hash",
4335 : nrels, &hash_ctl,
4336 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
4337 0 : for (i = 0; i < nrels; i++)
4338 : {
4339 : Oid hashkey;
4340 : MTTargetRelLookup *mtlookup;
4341 : bool found;
4342 :
4343 0 : resultRelInfo = &mtstate->resultRelInfo[i];
4344 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
4345 : mtlookup = (MTTargetRelLookup *)
4346 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
4347 : HASH_ENTER, &found);
4348 : Assert(!found);
4349 0 : mtlookup->relationIndex = i;
4350 : }
4351 : }
4352 : else
4353 134212 : mtstate->mt_resultOidHash = NULL;
4354 :
4355 : /*
4356 : * Determine if the FDW supports batch insert and determine the batch size
4357 : * (a FDW may support batching, but it may be disabled for the
4358 : * server/table).
4359 : *
4360 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
4361 : * remains set to 0.
4362 : */
4363 134212 : if (operation == CMD_INSERT)
4364 : {
4365 : /* insert may only have one relation, inheritance is not expanded */
4366 : Assert(nrels == 1);
4367 105640 : resultRelInfo = mtstate->resultRelInfo;
4368 105640 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4369 105640 : resultRelInfo->ri_FdwRoutine != NULL &&
4370 176 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
4371 176 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
4372 : {
4373 176 : resultRelInfo->ri_BatchSize =
4374 176 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
4375 176 : Assert(resultRelInfo->ri_BatchSize >= 1);
4376 : }
4377 : else
4378 105464 : resultRelInfo->ri_BatchSize = 1;
4379 : }
4380 :
4381 : /*
4382 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
4383 : * to estate->es_auxmodifytables so that it will be run to completion by
4384 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
4385 : * ModifyTable node too, but there's no need.) Note the use of lcons not
4386 : * lappend: we need later-initialized ModifyTable nodes to be shut down
4387 : * before earlier ones. This ensures that we don't throw away RETURNING
4388 : * rows that need to be seen by a later CTE subplan.
4389 : */
4390 134212 : if (!mtstate->canSetTag)
4391 894 : estate->es_auxmodifytables = lcons(mtstate,
4392 : estate->es_auxmodifytables);
4393 :
4394 134212 : return mtstate;
4395 : }
4396 :
4397 : /* ----------------------------------------------------------------
4398 : * ExecEndModifyTable
4399 : *
4400 : * Shuts down the plan.
4401 : *
4402 : * Returns nothing of interest.
4403 : * ----------------------------------------------------------------
4404 : */
4405 : void
4406 130534 : ExecEndModifyTable(ModifyTableState *node)
4407 : {
4408 : int i;
4409 :
4410 : /*
4411 : * Allow any FDWs to shut down
4412 : */
4413 262984 : for (i = 0; i < node->mt_nrels; i++)
4414 : {
4415 : int j;
4416 132450 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
4417 :
4418 132450 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4419 132258 : resultRelInfo->ri_FdwRoutine != NULL &&
4420 288 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
4421 288 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
4422 : resultRelInfo);
4423 :
4424 : /*
4425 : * Cleanup the initialized batch slots. This only matters for FDWs
4426 : * with batching, but the other cases will have ri_NumSlotsInitialized
4427 : * == 0.
4428 : */
4429 263576 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
4430 : {
4431 131126 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
4432 131126 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
4433 : }
4434 : }
4435 :
4436 : /*
4437 : * Close all the partitioned tables, leaf partitions, and their indices
4438 : * and release the slot used for tuple routing, if set.
4439 : */
4440 130534 : if (node->mt_partition_tuple_routing)
4441 : {
4442 5424 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
4443 :
4444 5424 : if (node->mt_root_tuple_slot)
4445 502 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
4446 : }
4447 :
4448 : /*
4449 : * Free the exprcontext
4450 : */
4451 130534 : ExecFreeExprContext(&node->ps);
4452 :
4453 : /*
4454 : * clean out the tuple table
4455 : */
4456 130534 : if (node->ps.ps_ResultTupleSlot)
4457 3848 : ExecClearTuple(node->ps.ps_ResultTupleSlot);
4458 :
4459 : /*
4460 : * Terminate EPQ execution if active
4461 : */
4462 130534 : EvalPlanQualEnd(&node->mt_epqstate);
4463 :
4464 : /*
4465 : * shut down subplan
4466 : */
4467 130534 : ExecEndNode(outerPlanState(node));
4468 130534 : }
4469 :
4470 : void
4471 0 : ExecReScanModifyTable(ModifyTableState *node)
4472 : {
4473 : /*
4474 : * Currently, we don't need to support rescan on ModifyTable nodes. The
4475 : * semantics of that would be a bit debatable anyway.
4476 : */
4477 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
4478 : }
|