Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * The relation to modify can be an ordinary table, a foreign table, or a
28 : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : * targeted a view not in one of those two categories, earlier processing
31 : * already pointed the ModifyTable result relation to an underlying
32 : * relation of that other view. This node does process
33 : * ri_WithCheckOptions, which may have expressions from those other,
34 : * automatically updatable views.
35 : *
36 : * MERGE runs a join between the source relation and the target table.
37 : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : * is an outer join that might output tuples without a matching target
39 : * tuple. In this case, any unmatched target tuples will have NULL
40 : * row-locating info, and only INSERT can be run. But for matched target
41 : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : * SOURCE, all tuples produced by the join will include a matching target
44 : * tuple, so all tuples contain row-locating info.
45 : *
46 : * If the query specifies RETURNING, then the ModifyTable returns a
47 : * RETURNING tuple after completing each row insert, update, or delete.
48 : * It must be called again to continue the operation. Without RETURNING,
49 : * we just loop within the node until all the work is done, then
50 : * return NULL. This avoids useless call/return overhead.
51 : */
52 :
53 : #include "postgres.h"
54 :
55 : #include "access/htup_details.h"
56 : #include "access/tableam.h"
57 : #include "access/tupconvert.h"
58 : #include "access/xact.h"
59 : #include "commands/trigger.h"
60 : #include "executor/execPartition.h"
61 : #include "executor/executor.h"
62 : #include "executor/instrument.h"
63 : #include "executor/nodeModifyTable.h"
64 : #include "foreign/fdwapi.h"
65 : #include "miscadmin.h"
66 : #include "nodes/nodeFuncs.h"
67 : #include "optimizer/optimizer.h"
68 : #include "rewrite/rewriteHandler.h"
69 : #include "rewrite/rewriteManip.h"
70 : #include "storage/lmgr.h"
71 : #include "utils/builtins.h"
72 : #include "utils/datum.h"
73 : #include "utils/injection_point.h"
74 : #include "utils/rangetypes.h"
75 : #include "utils/rel.h"
76 : #include "utils/snapmgr.h"
77 :
78 :
79 : typedef struct MTTargetRelLookup
80 : {
81 : Oid relationOid; /* hash key, must be first */
82 : int relationIndex; /* rel's index in resultRelInfo[] array */
83 : } MTTargetRelLookup;
84 :
85 : /*
86 : * Context struct for a ModifyTable operation, containing basic execution
87 : * state and some output variables populated by ExecUpdateAct() and
88 : * ExecDeleteAct() to report the result of their actions to callers.
89 : */
90 : typedef struct ModifyTableContext
91 : {
92 : /* Operation state */
93 : ModifyTableState *mtstate;
94 : EPQState *epqstate;
95 : EState *estate;
96 :
97 : /*
98 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
99 : * access "junk" columns that are not going to be stored.
100 : */
101 : TupleTableSlot *planSlot;
102 :
103 : /*
104 : * Information about the changes that were made concurrently to a tuple
105 : * being updated or deleted
106 : */
107 : TM_FailureData tmfd;
108 :
109 : /*
110 : * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
111 : * clause that refers to OLD columns (converted to the root's tuple
112 : * descriptor).
113 : */
114 : TupleTableSlot *cpDeletedSlot;
115 :
116 : /*
117 : * The tuple projected by the INSERT's RETURNING clause, when doing a
118 : * cross-partition UPDATE
119 : */
120 : TupleTableSlot *cpUpdateReturningSlot;
121 : } ModifyTableContext;
122 :
123 : /*
124 : * Context struct containing output data specific to UPDATE operations.
125 : */
126 : typedef struct UpdateContext
127 : {
128 : bool crossPartUpdate; /* was it a cross-partition update? */
129 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
130 :
131 : /*
132 : * Lock mode to acquire on the latest tuple version before performing
133 : * EvalPlanQual on it
134 : */
135 : LockTupleMode lockmode;
136 : } UpdateContext;
137 :
138 :
139 : static void ExecBatchInsert(ModifyTableState *mtstate,
140 : ResultRelInfo *resultRelInfo,
141 : TupleTableSlot **slots,
142 : TupleTableSlot **planSlots,
143 : int numSlots,
144 : EState *estate,
145 : bool canSetTag);
146 : static void ExecPendingInserts(EState *estate);
147 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
148 : ResultRelInfo *sourcePartInfo,
149 : ResultRelInfo *destPartInfo,
150 : ItemPointer tupleid,
151 : TupleTableSlot *oldslot,
152 : TupleTableSlot *newslot);
153 : static bool ExecOnConflictLockRow(ModifyTableContext *context,
154 : TupleTableSlot *existing,
155 : ItemPointer conflictTid,
156 : Relation relation,
157 : LockTupleMode lockmode,
158 : bool isUpdate);
159 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
160 : ResultRelInfo *resultRelInfo,
161 : ItemPointer conflictTid,
162 : TupleTableSlot *excludedSlot,
163 : bool canSetTag,
164 : TupleTableSlot **returning);
165 : static bool ExecOnConflictSelect(ModifyTableContext *context,
166 : ResultRelInfo *resultRelInfo,
167 : ItemPointer conflictTid,
168 : TupleTableSlot *excludedSlot,
169 : bool canSetTag,
170 : TupleTableSlot **returning);
171 : static void ExecForPortionOfLeftovers(ModifyTableContext *context,
172 : EState *estate,
173 : ResultRelInfo *resultRelInfo,
174 : ItemPointer tupleid);
175 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
176 : EState *estate,
177 : PartitionTupleRouting *proute,
178 : ResultRelInfo *targetRelInfo,
179 : TupleTableSlot *slot,
180 : ResultRelInfo **partRelInfo);
181 :
182 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
183 : ResultRelInfo *resultRelInfo,
184 : ItemPointer tupleid,
185 : HeapTuple oldtuple,
186 : bool canSetTag);
187 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
188 : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
189 : ResultRelInfo *resultRelInfo,
190 : ItemPointer tupleid,
191 : HeapTuple oldtuple,
192 : bool canSetTag,
193 : bool *matched);
194 : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
195 : ResultRelInfo *resultRelInfo,
196 : bool canSetTag);
197 : static void ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate);
198 : static void fireBSTriggers(ModifyTableState *node);
199 : static void fireASTriggers(ModifyTableState *node);
200 :
201 :
202 : /*
203 : * Verify that the tuples to be produced by INSERT match the
204 : * target relation's rowtype
205 : *
206 : * We do this to guard against stale plans. If plan invalidation is
207 : * functioning properly then we should never get a failure here, but better
208 : * safe than sorry. Note that this is called after we have obtained lock
209 : * on the target rel, so the rowtype can't change underneath us.
210 : *
211 : * The plan output is represented by its targetlist, because that makes
212 : * handling the dropped-column case easier.
213 : *
214 : * We used to use this for UPDATE as well, but now the equivalent checks
215 : * are done in ExecBuildUpdateProjection.
216 : */
217 : static void
218 56340 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
219 : {
220 56340 : TupleDesc resultDesc = RelationGetDescr(resultRel);
221 56340 : int attno = 0;
222 : ListCell *lc;
223 :
224 177064 : foreach(lc, targetList)
225 : {
226 120724 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
227 : Form_pg_attribute attr;
228 :
229 : Assert(!tle->resjunk); /* caller removed junk items already */
230 :
231 120724 : if (attno >= resultDesc->natts)
232 0 : ereport(ERROR,
233 : (errcode(ERRCODE_DATATYPE_MISMATCH),
234 : errmsg("table row type and query-specified row type do not match"),
235 : errdetail("Query has too many columns.")));
236 120724 : attr = TupleDescAttr(resultDesc, attno);
237 120724 : attno++;
238 :
239 : /*
240 : * Special cases here should match planner's expand_insert_targetlist.
241 : */
242 120724 : if (attr->attisdropped)
243 : {
244 : /*
245 : * For a dropped column, we can't check atttypid (it's likely 0).
246 : * In any case the planner has most likely inserted an INT4 null.
247 : * What we insist on is just *some* NULL constant.
248 : */
249 443 : if (!IsA(tle->expr, Const) ||
250 443 : !((Const *) tle->expr)->constisnull)
251 0 : ereport(ERROR,
252 : (errcode(ERRCODE_DATATYPE_MISMATCH),
253 : errmsg("table row type and query-specified row type do not match"),
254 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
255 : attno)));
256 : }
257 120281 : else if (attr->attgenerated)
258 : {
259 : /*
260 : * For a generated column, the planner will have inserted a null
261 : * of the column's base type (to avoid possibly failing on domain
262 : * not-null constraints). It doesn't seem worth insisting on that
263 : * exact type though, since a null value is type-independent. As
264 : * above, just insist on *some* NULL constant.
265 : */
266 824 : if (!IsA(tle->expr, Const) ||
267 824 : !((Const *) tle->expr)->constisnull)
268 0 : ereport(ERROR,
269 : (errcode(ERRCODE_DATATYPE_MISMATCH),
270 : errmsg("table row type and query-specified row type do not match"),
271 : errdetail("Query provides a value for a generated column at ordinal position %d.",
272 : attno)));
273 : }
274 : else
275 : {
276 : /* Normal case: demand type match */
277 119457 : if (exprType((Node *) tle->expr) != attr->atttypid)
278 0 : ereport(ERROR,
279 : (errcode(ERRCODE_DATATYPE_MISMATCH),
280 : errmsg("table row type and query-specified row type do not match"),
281 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
282 : format_type_be(attr->atttypid),
283 : attno,
284 : format_type_be(exprType((Node *) tle->expr)))));
285 : }
286 : }
287 56340 : if (attno != resultDesc->natts)
288 0 : ereport(ERROR,
289 : (errcode(ERRCODE_DATATYPE_MISMATCH),
290 : errmsg("table row type and query-specified row type do not match"),
291 : errdetail("Query has too few columns.")));
292 56340 : }
293 :
294 : /*
295 : * ExecProcessReturning --- evaluate a RETURNING list
296 : *
297 : * context: context for the ModifyTable operation
298 : * resultRelInfo: current result rel
299 : * isDelete: true if the operation/merge action is a DELETE
300 : * oldSlot: slot holding old tuple deleted or updated
301 : * newSlot: slot holding new tuple inserted or updated
302 : * planSlot: slot holding tuple returned by top subplan node
303 : *
304 : * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
305 : * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
306 : * modify is disabled if the RETURNING list refers to any OLD/NEW values).
307 : *
308 : * Note: For the SELECT path of INSERT ... ON CONFLICT DO SELECT, oldSlot and
309 : * newSlot are both the existing tuple, since it's not changed.
310 : *
311 : * Returns a slot holding the result tuple
312 : */
313 : static TupleTableSlot *
314 5626 : ExecProcessReturning(ModifyTableContext *context,
315 : ResultRelInfo *resultRelInfo,
316 : bool isDelete,
317 : TupleTableSlot *oldSlot,
318 : TupleTableSlot *newSlot,
319 : TupleTableSlot *planSlot)
320 : {
321 5626 : EState *estate = context->estate;
322 5626 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
323 5626 : ExprContext *econtext = projectReturning->pi_exprContext;
324 :
325 : /* Make tuple and any needed join variables available to ExecProject */
326 5626 : if (isDelete)
327 : {
328 : /* return old tuple by default */
329 877 : if (oldSlot)
330 758 : econtext->ecxt_scantuple = oldSlot;
331 : }
332 : else
333 : {
334 : /* return new tuple by default */
335 4749 : if (newSlot)
336 4521 : econtext->ecxt_scantuple = newSlot;
337 : }
338 5626 : econtext->ecxt_outertuple = planSlot;
339 :
340 : /* Make old/new tuples available to ExecProject, if required */
341 5626 : if (oldSlot)
342 2541 : econtext->ecxt_oldtuple = oldSlot;
343 3085 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
344 140 : econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
345 : else
346 2945 : econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
347 :
348 5626 : if (newSlot)
349 4521 : econtext->ecxt_newtuple = newSlot;
350 1105 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
351 96 : econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
352 : else
353 1009 : econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
354 :
355 : /*
356 : * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
357 : * information is required to evaluate ReturningExpr nodes and also in
358 : * ExecEvalSysVar() and ExecEvalWholeRowVar().
359 : */
360 5626 : if (oldSlot == NULL)
361 3085 : projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
362 : else
363 2541 : projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
364 :
365 5626 : if (newSlot == NULL)
366 1105 : projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
367 : else
368 4521 : projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
369 :
370 : /* Compute the RETURNING expressions */
371 5626 : return ExecProject(projectReturning);
372 : }
373 :
374 : /*
375 : * ExecCheckTupleVisible -- verify tuple is visible
376 : *
377 : * It would not be consistent with guarantees of the higher isolation levels to
378 : * proceed with avoiding insertion (taking speculative insertion's alternative
379 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
380 : * Check for the need to raise a serialization failure, and do so as necessary.
381 : */
382 : static void
383 2935 : ExecCheckTupleVisible(EState *estate,
384 : Relation rel,
385 : TupleTableSlot *slot)
386 : {
387 2935 : if (!IsolationUsesXactSnapshot())
388 2893 : return;
389 :
390 42 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
391 : {
392 : Datum xminDatum;
393 : TransactionId xmin;
394 : bool isnull;
395 :
396 30 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
397 : Assert(!isnull);
398 30 : xmin = DatumGetTransactionId(xminDatum);
399 :
400 : /*
401 : * We should not raise a serialization failure if the conflict is
402 : * against a tuple inserted by our own transaction, even if it's not
403 : * visible to our snapshot. (This would happen, for example, if
404 : * conflicting keys are proposed for insertion in a single command.)
405 : */
406 30 : if (!TransactionIdIsCurrentTransactionId(xmin))
407 10 : ereport(ERROR,
408 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
409 : errmsg("could not serialize access due to concurrent update")));
410 : }
411 : }
412 :
413 : /*
414 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
415 : */
416 : static void
417 139 : ExecCheckTIDVisible(EState *estate,
418 : ResultRelInfo *relinfo,
419 : ItemPointer tid,
420 : TupleTableSlot *tempSlot)
421 : {
422 139 : Relation rel = relinfo->ri_RelationDesc;
423 :
424 : /* Redundantly check isolation level */
425 139 : if (!IsolationUsesXactSnapshot())
426 105 : return;
427 :
428 34 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
429 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
430 34 : ExecCheckTupleVisible(estate, rel, tempSlot);
431 24 : ExecClearTuple(tempSlot);
432 : }
433 :
434 : /*
435 : * Initialize generated columns handling for a tuple
436 : *
437 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or
438 : * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
439 : * This is used only for stored generated columns.
440 : *
441 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
442 : * This is used by both stored and virtual generated columns.
443 : *
444 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
445 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
446 : * cross-partition UPDATEs, since a partition might be the target of both
447 : * UPDATE and INSERT actions.
448 : */
449 : void
450 31840 : ExecInitGenerated(ResultRelInfo *resultRelInfo,
451 : EState *estate,
452 : CmdType cmdtype)
453 : {
454 31840 : Relation rel = resultRelInfo->ri_RelationDesc;
455 31840 : TupleDesc tupdesc = RelationGetDescr(rel);
456 31840 : int natts = tupdesc->natts;
457 : ExprState **ri_GeneratedExprs;
458 : int ri_NumGeneratedNeeded;
459 : Bitmapset *updatedCols;
460 : MemoryContext oldContext;
461 :
462 : /* Nothing to do if no generated columns */
463 31840 : if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual)))
464 31081 : return;
465 :
466 : /*
467 : * In an UPDATE, we can skip computing any generated columns that do not
468 : * depend on any UPDATE target column. But if there is a BEFORE ROW
469 : * UPDATE trigger, we cannot skip because the trigger might change more
470 : * columns.
471 : */
472 759 : if (cmdtype == CMD_UPDATE &&
473 151 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
474 123 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
475 : else
476 636 : updatedCols = NULL;
477 :
478 : /*
479 : * Make sure these data structures are built in the per-query memory
480 : * context so they'll survive throughout the query.
481 : */
482 759 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
483 :
484 759 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
485 759 : ri_NumGeneratedNeeded = 0;
486 :
487 3034 : for (int i = 0; i < natts; i++)
488 : {
489 2279 : char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated;
490 :
491 2279 : if (attgenerated)
492 : {
493 : Expr *expr;
494 :
495 : /* Fetch the GENERATED AS expression tree */
496 806 : expr = (Expr *) build_column_default(rel, i + 1);
497 806 : if (expr == NULL)
498 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
499 : i + 1, RelationGetRelationName(rel));
500 :
501 : /*
502 : * If it's an update with a known set of update target columns,
503 : * see if we can skip the computation.
504 : */
505 806 : if (updatedCols)
506 : {
507 131 : Bitmapset *attrs_used = NULL;
508 :
509 131 : pull_varattnos((Node *) expr, 1, &attrs_used);
510 :
511 131 : if (!bms_overlap(updatedCols, attrs_used))
512 15 : continue; /* need not update this column */
513 : }
514 :
515 : /* No luck, so prepare the expression for execution */
516 791 : if (attgenerated == ATTRIBUTE_GENERATED_STORED)
517 : {
518 743 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
519 739 : ri_NumGeneratedNeeded++;
520 : }
521 :
522 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
523 787 : if (cmdtype == CMD_UPDATE)
524 148 : resultRelInfo->ri_extraUpdatedCols =
525 148 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
526 : i + 1 - FirstLowInvalidHeapAttributeNumber);
527 : }
528 : }
529 :
530 755 : if (ri_NumGeneratedNeeded == 0)
531 : {
532 : /* didn't need it after all */
533 27 : pfree(ri_GeneratedExprs);
534 27 : ri_GeneratedExprs = NULL;
535 : }
536 :
537 : /* Save in appropriate set of fields */
538 755 : if (cmdtype == CMD_UPDATE)
539 : {
540 : /* Don't call twice */
541 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
542 :
543 151 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
544 151 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
545 :
546 151 : resultRelInfo->ri_extraUpdatedCols_valid = true;
547 : }
548 : else
549 : {
550 : /* Don't call twice */
551 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
552 :
553 604 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
554 604 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
555 : }
556 :
557 755 : MemoryContextSwitchTo(oldContext);
558 : }
559 :
560 : /*
561 : * Compute stored generated columns for a tuple
562 : */
563 : void
564 1069 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
565 : EState *estate, TupleTableSlot *slot,
566 : CmdType cmdtype)
567 : {
568 1069 : Relation rel = resultRelInfo->ri_RelationDesc;
569 1069 : TupleDesc tupdesc = RelationGetDescr(rel);
570 1069 : int natts = tupdesc->natts;
571 1069 : ExprContext *econtext = GetPerTupleExprContext(estate);
572 : ExprState **ri_GeneratedExprs;
573 : MemoryContext oldContext;
574 : Datum *values;
575 : bool *nulls;
576 :
577 : /* We should not be called unless this is true */
578 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
579 :
580 : /*
581 : * Initialize the expressions if we didn't already, and check whether we
582 : * can exit early because nothing needs to be computed.
583 : */
584 1069 : if (cmdtype == CMD_UPDATE)
585 : {
586 160 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
587 119 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
588 160 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
589 11 : return;
590 149 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
591 : }
592 : else
593 : {
594 909 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
595 608 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
596 : /* Early exit is impossible given the prior Assert */
597 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
598 905 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
599 : }
600 :
601 1054 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
602 :
603 1054 : values = palloc_array(Datum, natts);
604 1054 : nulls = palloc_array(bool, natts);
605 :
606 1054 : slot_getallattrs(slot);
607 1054 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
608 :
609 4229 : for (int i = 0; i < natts; i++)
610 : {
611 3191 : CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
612 :
613 3191 : if (ri_GeneratedExprs[i])
614 : {
615 : Datum val;
616 : bool isnull;
617 :
618 : Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
619 :
620 1067 : econtext->ecxt_scantuple = slot;
621 :
622 1067 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
623 :
624 : /*
625 : * We must make a copy of val as we have no guarantees about where
626 : * memory for a pass-by-reference Datum is located.
627 : */
628 1051 : if (!isnull)
629 995 : val = datumCopy(val, attr->attbyval, attr->attlen);
630 :
631 1051 : values[i] = val;
632 1051 : nulls[i] = isnull;
633 : }
634 : else
635 : {
636 2124 : if (!nulls[i])
637 2030 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
638 : }
639 : }
640 :
641 1038 : ExecClearTuple(slot);
642 1038 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
643 1038 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
644 1038 : ExecStoreVirtualTuple(slot);
645 1038 : ExecMaterializeSlot(slot);
646 :
647 1038 : MemoryContextSwitchTo(oldContext);
648 : }
649 :
650 : /*
651 : * ExecInitInsertProjection
652 : * Do one-time initialization of projection data for INSERT tuples.
653 : *
654 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
655 : *
656 : * This is also a convenient place to verify that the
657 : * output of an INSERT matches the target table.
658 : */
659 : static void
660 55636 : ExecInitInsertProjection(ModifyTableState *mtstate,
661 : ResultRelInfo *resultRelInfo)
662 : {
663 55636 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
664 55636 : Plan *subplan = outerPlan(node);
665 55636 : EState *estate = mtstate->ps.state;
666 55636 : List *insertTargetList = NIL;
667 55636 : bool need_projection = false;
668 : ListCell *l;
669 :
670 : /* Extract non-junk columns of the subplan's result tlist. */
671 174527 : foreach(l, subplan->targetlist)
672 : {
673 118891 : TargetEntry *tle = (TargetEntry *) lfirst(l);
674 :
675 118891 : if (!tle->resjunk)
676 118891 : insertTargetList = lappend(insertTargetList, tle);
677 : else
678 0 : need_projection = true;
679 : }
680 :
681 : /*
682 : * The junk-free list must produce a tuple suitable for the result
683 : * relation.
684 : */
685 55636 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
686 :
687 : /* We'll need a slot matching the table's format. */
688 55636 : resultRelInfo->ri_newTupleSlot =
689 55636 : table_slot_create(resultRelInfo->ri_RelationDesc,
690 : &estate->es_tupleTable);
691 :
692 : /* Build ProjectionInfo if needed (it probably isn't). */
693 55636 : if (need_projection)
694 : {
695 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
696 :
697 : /* need an expression context to do the projection */
698 0 : if (mtstate->ps.ps_ExprContext == NULL)
699 0 : ExecAssignExprContext(estate, &mtstate->ps);
700 :
701 0 : resultRelInfo->ri_projectNew =
702 0 : ExecBuildProjectionInfo(insertTargetList,
703 : mtstate->ps.ps_ExprContext,
704 : resultRelInfo->ri_newTupleSlot,
705 : &mtstate->ps,
706 : relDesc);
707 : }
708 :
709 55636 : resultRelInfo->ri_projectNewInfoValid = true;
710 55636 : }
711 :
712 : /*
713 : * ExecInitUpdateProjection
714 : * Do one-time initialization of projection data for UPDATE tuples.
715 : *
716 : * UPDATE always needs a projection, because (1) there's always some junk
717 : * attrs, and (2) we may need to merge values of not-updated columns from
718 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
719 : * the subplan contains only new values for the changed columns, plus row
720 : * identity info in the junk attrs.
721 : *
722 : * This is "one-time" for any given result rel, but we might touch more than
723 : * one result rel in the course of an inherited UPDATE, and each one needs
724 : * its own projection due to possible column order variation.
725 : *
726 : * This is also a convenient place to verify that the output of an UPDATE
727 : * matches the target table (ExecBuildUpdateProjection does that).
728 : */
729 : static void
730 8669 : ExecInitUpdateProjection(ModifyTableState *mtstate,
731 : ResultRelInfo *resultRelInfo)
732 : {
733 8669 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
734 8669 : Plan *subplan = outerPlan(node);
735 8669 : EState *estate = mtstate->ps.state;
736 8669 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
737 : int whichrel;
738 : List *updateColnos;
739 :
740 : /*
741 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
742 : * to, we can get the index the hard way with an integer division.
743 : */
744 8669 : whichrel = mtstate->mt_lastResultIndex;
745 8669 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
746 : {
747 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
748 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
749 : }
750 :
751 8669 : updateColnos = (List *) list_nth(mtstate->mt_updateColnosLists, whichrel);
752 :
753 : /*
754 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
755 : * produced by the subplan to get the new tuple. We need two slots, both
756 : * matching the table's desired format.
757 : */
758 8669 : resultRelInfo->ri_oldTupleSlot =
759 8669 : table_slot_create(resultRelInfo->ri_RelationDesc,
760 : &estate->es_tupleTable);
761 8669 : resultRelInfo->ri_newTupleSlot =
762 8669 : table_slot_create(resultRelInfo->ri_RelationDesc,
763 : &estate->es_tupleTable);
764 :
765 : /* need an expression context to do the projection */
766 8669 : if (mtstate->ps.ps_ExprContext == NULL)
767 7408 : ExecAssignExprContext(estate, &mtstate->ps);
768 :
769 8669 : resultRelInfo->ri_projectNew =
770 8669 : ExecBuildUpdateProjection(subplan->targetlist,
771 : false, /* subplan did the evaluation */
772 : updateColnos,
773 : relDesc,
774 : mtstate->ps.ps_ExprContext,
775 : resultRelInfo->ri_newTupleSlot,
776 : &mtstate->ps);
777 :
778 8669 : resultRelInfo->ri_projectNewInfoValid = true;
779 8669 : }
780 :
781 : /*
782 : * ExecGetInsertNewTuple
783 : * This prepares a "new" tuple ready to be inserted into given result
784 : * relation, by removing any junk columns of the plan's output tuple
785 : * and (if necessary) coercing the tuple to the right tuple format.
786 : */
787 : static TupleTableSlot *
788 7735352 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
789 : TupleTableSlot *planSlot)
790 : {
791 7735352 : ProjectionInfo *newProj = relinfo->ri_projectNew;
792 : ExprContext *econtext;
793 :
794 : /*
795 : * If there's no projection to be done, just make sure the slot is of the
796 : * right type for the target rel. If the planSlot is the right type we
797 : * can use it as-is, else copy the data into ri_newTupleSlot.
798 : */
799 7735352 : if (newProj == NULL)
800 : {
801 7735352 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
802 : {
803 7222324 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
804 7222324 : return relinfo->ri_newTupleSlot;
805 : }
806 : else
807 513028 : return planSlot;
808 : }
809 :
810 : /*
811 : * Else project; since the projection output slot is ri_newTupleSlot, this
812 : * will also fix any slot-type problem.
813 : *
814 : * Note: currently, this is dead code, because INSERT cases don't receive
815 : * any junk columns so there's never a projection to be done.
816 : */
817 0 : econtext = newProj->pi_exprContext;
818 0 : econtext->ecxt_outertuple = planSlot;
819 0 : return ExecProject(newProj);
820 : }
821 :
822 : /*
823 : * ExecGetUpdateNewTuple
824 : * This prepares a "new" tuple by combining an UPDATE subplan's output
825 : * tuple (which contains values of changed columns) with unchanged
826 : * columns taken from the old tuple.
827 : *
828 : * The subplan tuple might also contain junk columns, which are ignored.
829 : * Note that the projection also ensures we have a slot of the right type.
830 : */
831 : TupleTableSlot *
832 2211466 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
833 : TupleTableSlot *planSlot,
834 : TupleTableSlot *oldSlot)
835 : {
836 2211466 : ProjectionInfo *newProj = relinfo->ri_projectNew;
837 : ExprContext *econtext;
838 :
839 : /* Use a few extra Asserts to protect against outside callers */
840 : Assert(relinfo->ri_projectNewInfoValid);
841 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
842 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
843 :
844 2211466 : econtext = newProj->pi_exprContext;
845 2211466 : econtext->ecxt_outertuple = planSlot;
846 2211466 : econtext->ecxt_scantuple = oldSlot;
847 2211466 : return ExecProject(newProj);
848 : }
849 :
850 : /* ----------------------------------------------------------------
851 : * ExecInsert
852 : *
853 : * For INSERT, we have to insert the tuple into the target relation
854 : * (or partition thereof) and insert appropriate tuples into the index
855 : * relations.
856 : *
857 : * slot contains the new tuple value to be stored.
858 : *
859 : * Returns RETURNING result if any, otherwise NULL.
860 : * *inserted_tuple is the tuple that's effectively inserted;
861 : * *insert_destrel is the relation where it was inserted.
862 : * These are only set on success.
863 : *
864 : * This may change the currently active tuple conversion map in
865 : * mtstate->mt_transition_capture, so the callers must take care to
866 : * save the previous value to avoid losing track of it.
867 : * ----------------------------------------------------------------
868 : */
869 : static TupleTableSlot *
870 7738282 : ExecInsert(ModifyTableContext *context,
871 : ResultRelInfo *resultRelInfo,
872 : TupleTableSlot *slot,
873 : bool canSetTag,
874 : TupleTableSlot **inserted_tuple,
875 : ResultRelInfo **insert_destrel)
876 : {
877 7738282 : ModifyTableState *mtstate = context->mtstate;
878 7738282 : EState *estate = context->estate;
879 : Relation resultRelationDesc;
880 7738282 : List *recheckIndexes = NIL;
881 7738282 : TupleTableSlot *planSlot = context->planSlot;
882 7738282 : TupleTableSlot *result = NULL;
883 : TransitionCaptureState *ar_insert_trig_tcs;
884 7738282 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
885 7738282 : OnConflictAction onconflict = node->onConflictAction;
886 7738282 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
887 : MemoryContext oldContext;
888 :
889 : /*
890 : * If the input result relation is a partitioned table, find the leaf
891 : * partition to insert the tuple into.
892 : */
893 7738282 : if (proute)
894 : {
895 : ResultRelInfo *partRelInfo;
896 :
897 480974 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
898 : resultRelInfo, slot,
899 : &partRelInfo);
900 480830 : resultRelInfo = partRelInfo;
901 : }
902 :
903 7738138 : ExecMaterializeSlot(slot);
904 :
905 7738138 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
906 :
907 : /*
908 : * Open the table's indexes, if we have not done so already, so that we
909 : * can add new index entries for the inserted tuple.
910 : */
911 7738138 : if (resultRelationDesc->rd_rel->relhasindex &&
912 2367490 : resultRelInfo->ri_IndexRelationDescs == NULL)
913 20480 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
914 :
915 : /*
916 : * BEFORE ROW INSERT Triggers.
917 : *
918 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
919 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
920 : * violations before firing these triggers, because they can change the
921 : * values to insert. Also, they can run arbitrary user-defined code with
922 : * side-effects that we can't cancel by just not inserting the tuple.
923 : */
924 7738138 : if (resultRelInfo->ri_TrigDesc &&
925 452144 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
926 : {
927 : /* Flush any pending inserts, so rows are visible to the triggers */
928 1418 : if (estate->es_insert_pending_result_relations != NIL)
929 3 : ExecPendingInserts(estate);
930 :
931 1418 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
932 131 : return NULL; /* "do nothing" */
933 : }
934 :
935 : /* INSTEAD OF ROW INSERT Triggers */
936 7737945 : if (resultRelInfo->ri_TrigDesc &&
937 451951 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
938 : {
939 111 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
940 4 : return NULL; /* "do nothing" */
941 : }
942 7737834 : else if (resultRelInfo->ri_FdwRoutine)
943 : {
944 : /*
945 : * GENERATED expressions might reference the tableoid column, so
946 : * (re-)initialize tts_tableOid before evaluating them.
947 : */
948 1010 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
949 :
950 : /*
951 : * Compute stored generated columns
952 : */
953 1010 : if (resultRelationDesc->rd_att->constr &&
954 179 : resultRelationDesc->rd_att->constr->has_generated_stored)
955 4 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
956 : CMD_INSERT);
957 :
958 : /*
959 : * If the FDW supports batching, and batching is requested, accumulate
960 : * rows and insert them in batches. Otherwise use the per-row inserts.
961 : */
962 1010 : if (resultRelInfo->ri_BatchSize > 1)
963 : {
964 145 : bool flushed = false;
965 :
966 : /*
967 : * When we've reached the desired batch size, perform the
968 : * insertion.
969 : */
970 145 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
971 : {
972 10 : ExecBatchInsert(mtstate, resultRelInfo,
973 : resultRelInfo->ri_Slots,
974 : resultRelInfo->ri_PlanSlots,
975 : resultRelInfo->ri_NumSlots,
976 : estate, canSetTag);
977 10 : flushed = true;
978 : }
979 :
980 145 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
981 :
982 145 : if (resultRelInfo->ri_Slots == NULL)
983 : {
984 15 : resultRelInfo->ri_Slots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
985 15 : resultRelInfo->ri_PlanSlots = palloc_array(TupleTableSlot *, resultRelInfo->ri_BatchSize);
986 : }
987 :
988 : /*
989 : * Initialize the batch slots. We don't know how many slots will
990 : * be needed, so we initialize them as the batch grows, and we
991 : * keep them across batches. To mitigate an inefficiency in how
992 : * resource owner handles objects with many references (as with
993 : * many slots all referencing the same tuple descriptor) we copy
994 : * the appropriate tuple descriptor for each slot.
995 : */
996 145 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
997 : {
998 72 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
999 : TupleDesc plan_tdesc =
1000 72 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
1001 :
1002 144 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
1003 72 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
1004 :
1005 144 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
1006 72 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
1007 :
1008 : /* remember how many batch slots we initialized */
1009 72 : resultRelInfo->ri_NumSlotsInitialized++;
1010 : }
1011 :
1012 145 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
1013 : slot);
1014 :
1015 145 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
1016 : planSlot);
1017 :
1018 : /*
1019 : * If these are the first tuples stored in the buffers, add the
1020 : * target rel and the mtstate to the
1021 : * es_insert_pending_result_relations and
1022 : * es_insert_pending_modifytables lists respectively, except in
1023 : * the case where flushing was done above, in which case they
1024 : * would already have been added to the lists, so no need to do
1025 : * this.
1026 : */
1027 145 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
1028 : {
1029 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
1030 : resultRelInfo));
1031 19 : estate->es_insert_pending_result_relations =
1032 19 : lappend(estate->es_insert_pending_result_relations,
1033 : resultRelInfo);
1034 19 : estate->es_insert_pending_modifytables =
1035 19 : lappend(estate->es_insert_pending_modifytables, mtstate);
1036 : }
1037 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
1038 : resultRelInfo));
1039 :
1040 145 : resultRelInfo->ri_NumSlots++;
1041 :
1042 145 : MemoryContextSwitchTo(oldContext);
1043 :
1044 145 : return NULL;
1045 : }
1046 :
1047 : /*
1048 : * insert into foreign table: let the FDW do it
1049 : */
1050 865 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
1051 : resultRelInfo,
1052 : slot,
1053 : planSlot);
1054 :
1055 862 : if (slot == NULL) /* "do nothing" */
1056 2 : return NULL;
1057 :
1058 : /*
1059 : * AFTER ROW Triggers or RETURNING expressions might reference the
1060 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
1061 : * them. (This covers the case where the FDW replaced the slot.)
1062 : */
1063 860 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1064 : }
1065 : else
1066 : {
1067 : WCOKind wco_kind;
1068 :
1069 : /*
1070 : * Constraints and GENERATED expressions might reference the tableoid
1071 : * column, so (re-)initialize tts_tableOid before evaluating them.
1072 : */
1073 7736824 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1074 :
1075 : /*
1076 : * Compute stored generated columns
1077 : */
1078 7736824 : if (resultRelationDesc->rd_att->constr &&
1079 2523349 : resultRelationDesc->rd_att->constr->has_generated_stored)
1080 880 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1081 : CMD_INSERT);
1082 :
1083 : /*
1084 : * Check any RLS WITH CHECK policies.
1085 : *
1086 : * Normally we should check INSERT policies. But if the insert is the
1087 : * result of a partition key update that moved the tuple to a new
1088 : * partition, we should instead check UPDATE policies, because we are
1089 : * executing policies defined on the target table, and not those
1090 : * defined on the child partitions.
1091 : *
1092 : * If we're running MERGE, we refer to the action that we're executing
1093 : * to know if we're doing an INSERT or UPDATE to a partition table.
1094 : */
1095 7736804 : if (mtstate->operation == CMD_UPDATE)
1096 549 : wco_kind = WCO_RLS_UPDATE_CHECK;
1097 7736255 : else if (mtstate->operation == CMD_MERGE)
1098 1181 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1099 1181 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
1100 : else
1101 7735074 : wco_kind = WCO_RLS_INSERT_CHECK;
1102 :
1103 : /*
1104 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1105 : * we are looking for at this point.
1106 : */
1107 7736804 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1108 474 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1109 :
1110 : /*
1111 : * Check the constraints of the tuple.
1112 : */
1113 7736676 : if (resultRelationDesc->rd_att->constr)
1114 2523257 : ExecConstraints(resultRelInfo, slot, estate);
1115 :
1116 : /*
1117 : * Also check the tuple against the partition constraint, if there is
1118 : * one; except that if we got here via tuple-routing, we don't need to
1119 : * if there's no BR trigger defined on the partition.
1120 : */
1121 7736159 : if (resultRelationDesc->rd_rel->relispartition &&
1122 483314 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1123 480470 : (resultRelInfo->ri_TrigDesc &&
1124 1113 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1125 2982 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1126 :
1127 7736047 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1128 2215 : {
1129 : /* Perform a speculative insertion. */
1130 : uint32 specToken;
1131 : ItemPointerData conflictTid;
1132 : ItemPointerData invalidItemPtr;
1133 : bool specConflict;
1134 : List *arbiterIndexes;
1135 :
1136 5291 : ItemPointerSetInvalid(&invalidItemPtr);
1137 5291 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1138 :
1139 : /*
1140 : * Do a non-conclusive check for conflicts first.
1141 : *
1142 : * We're not holding any locks yet, so this doesn't guarantee that
1143 : * the later insert won't conflict. But it avoids leaving behind
1144 : * a lot of canceled speculative insertions, if you run a lot of
1145 : * INSERT ON CONFLICT statements that do conflict.
1146 : *
1147 : * We loop back here if we find a conflict below, either during
1148 : * the pre-check, or when we re-check after inserting the tuple
1149 : * speculatively. Better allow interrupts in case some bug makes
1150 : * this an infinite loop.
1151 : */
1152 14 : vlock:
1153 5305 : CHECK_FOR_INTERRUPTS();
1154 5305 : specConflict = false;
1155 5305 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1156 : &conflictTid, &invalidItemPtr,
1157 : arbiterIndexes))
1158 : {
1159 : /* committed conflict tuple found */
1160 3071 : if (onconflict == ONCONFLICT_UPDATE)
1161 : {
1162 : /*
1163 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1164 : * part. Be prepared to retry if the UPDATE fails because
1165 : * of another concurrent UPDATE/DELETE to the conflict
1166 : * tuple.
1167 : */
1168 2740 : TupleTableSlot *returning = NULL;
1169 :
1170 2740 : if (ExecOnConflictUpdate(context, resultRelInfo,
1171 : &conflictTid, slot, canSetTag,
1172 : &returning))
1173 : {
1174 2685 : InstrCountTuples2(&mtstate->ps, 1);
1175 2685 : return returning;
1176 : }
1177 : else
1178 3 : goto vlock;
1179 : }
1180 331 : else if (onconflict == ONCONFLICT_SELECT)
1181 : {
1182 : /*
1183 : * In case of ON CONFLICT DO SELECT, optionally lock the
1184 : * conflicting tuple, fetch it and project RETURNING on
1185 : * it. Be prepared to retry if locking fails because of a
1186 : * concurrent UPDATE/DELETE to the conflict tuple.
1187 : */
1188 192 : TupleTableSlot *returning = NULL;
1189 :
1190 192 : if (ExecOnConflictSelect(context, resultRelInfo,
1191 : &conflictTid, slot, canSetTag,
1192 : &returning))
1193 : {
1194 176 : InstrCountTuples2(&mtstate->ps, 1);
1195 176 : return returning;
1196 : }
1197 : else
1198 0 : goto vlock;
1199 : }
1200 : else
1201 : {
1202 : /*
1203 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1204 : * verify that the tuple is visible to the executor's MVCC
1205 : * snapshot at higher isolation levels.
1206 : *
1207 : * Using ExecGetReturningSlot() to store the tuple for the
1208 : * recheck isn't that pretty, but we can't trivially use
1209 : * the input slot, because it might not be of a compatible
1210 : * type. As there's no conflicting usage of
1211 : * ExecGetReturningSlot() in the DO NOTHING case...
1212 : */
1213 : Assert(onconflict == ONCONFLICT_NOTHING);
1214 139 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1215 : ExecGetReturningSlot(estate, resultRelInfo));
1216 129 : InstrCountTuples2(&mtstate->ps, 1);
1217 129 : return NULL;
1218 : }
1219 : }
1220 :
1221 : /*
1222 : * Before we start insertion proper, acquire our "speculative
1223 : * insertion lock". Others can use that to wait for us to decide
1224 : * if we're going to go ahead with the insertion, instead of
1225 : * waiting for the whole transaction to complete.
1226 : */
1227 2230 : INJECTION_POINT("exec-insert-before-insert-speculative", NULL);
1228 2230 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1229 :
1230 : /* insert the tuple, with the speculative token */
1231 2230 : table_tuple_insert_speculative(resultRelationDesc, slot,
1232 : estate->es_output_cid,
1233 : 0,
1234 : NULL,
1235 : specToken);
1236 :
1237 : /* insert index entries for tuple */
1238 2230 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1239 : estate, EIIT_NO_DUPE_ERROR,
1240 : slot, arbiterIndexes,
1241 : &specConflict);
1242 :
1243 : /* adjust the tuple's state accordingly */
1244 2226 : table_tuple_complete_speculative(resultRelationDesc, slot,
1245 2226 : specToken, !specConflict);
1246 :
1247 : /*
1248 : * Wake up anyone waiting for our decision. They will re-check
1249 : * the tuple, see that it's no longer speculative, and wait on our
1250 : * XID as if this was a regularly inserted tuple all along. Or if
1251 : * we killed the tuple, they will see it's dead, and proceed as if
1252 : * the tuple never existed.
1253 : */
1254 2226 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1255 :
1256 : /*
1257 : * If there was a conflict, start from the beginning. We'll do
1258 : * the pre-check again, which will now find the conflicting tuple
1259 : * (unless it aborts before we get there).
1260 : */
1261 2226 : if (specConflict)
1262 : {
1263 11 : list_free(recheckIndexes);
1264 11 : goto vlock;
1265 : }
1266 :
1267 : /* Since there was no insertion conflict, we're done */
1268 : }
1269 : else
1270 : {
1271 : /* insert the tuple normally */
1272 7730756 : table_tuple_insert(resultRelationDesc, slot,
1273 : estate->es_output_cid,
1274 : 0, NULL);
1275 :
1276 : /* insert index entries for tuple */
1277 7730738 : if (resultRelInfo->ri_NumIndices > 0)
1278 2361818 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo, estate,
1279 : 0, slot, NIL,
1280 : NULL);
1281 : }
1282 : }
1283 :
1284 7733541 : if (canSetTag)
1285 7731717 : (estate->es_processed)++;
1286 :
1287 : /*
1288 : * If this insert is the result of a partition key update that moved the
1289 : * tuple to a new partition, put this row into the transition NEW TABLE,
1290 : * if there is one. We need to do this separately for DELETE and INSERT
1291 : * because they happen on different tables.
1292 : */
1293 7733541 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1294 7733541 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1295 36 : && mtstate->mt_transition_capture->tcs_update_new_table)
1296 : {
1297 32 : ExecARUpdateTriggers(estate, resultRelInfo,
1298 : NULL, NULL,
1299 : NULL,
1300 : NULL,
1301 : slot,
1302 : NULL,
1303 32 : mtstate->mt_transition_capture,
1304 : false);
1305 :
1306 : /*
1307 : * We've already captured the NEW TABLE row, so make sure any AR
1308 : * INSERT trigger fired below doesn't capture it again.
1309 : */
1310 32 : ar_insert_trig_tcs = NULL;
1311 : }
1312 :
1313 : /* AFTER ROW INSERT Triggers */
1314 7733541 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1315 : ar_insert_trig_tcs);
1316 :
1317 7733540 : list_free(recheckIndexes);
1318 :
1319 : /*
1320 : * Check any WITH CHECK OPTION constraints from parent views. We are
1321 : * required to do this after testing all constraints and uniqueness
1322 : * violations per the SQL spec, so we do it after actually inserting the
1323 : * record into the heap and all indexes.
1324 : *
1325 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1326 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1327 : *
1328 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1329 : * are looking for at this point.
1330 : */
1331 7733540 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1332 294 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1333 :
1334 : /* Process RETURNING if present */
1335 7733444 : if (resultRelInfo->ri_projectReturning)
1336 : {
1337 2768 : TupleTableSlot *oldSlot = NULL;
1338 :
1339 : /*
1340 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1341 : * refers to any OLD columns, ExecDelete() will have saved the tuple
1342 : * deleted from the original partition, which we must use here to
1343 : * compute the OLD column values. Otherwise, all OLD column values
1344 : * will be NULL.
1345 : */
1346 2768 : if (context->cpDeletedSlot)
1347 : {
1348 : TupleConversionMap *tupconv_map;
1349 :
1350 : /*
1351 : * Convert the OLD tuple to the new partition's format/slot, if
1352 : * needed. Note that ExecDelete() already converted it to the
1353 : * root's partition's format/slot.
1354 : */
1355 30 : oldSlot = context->cpDeletedSlot;
1356 30 : tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1357 30 : if (tupconv_map != NULL)
1358 : {
1359 10 : oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1360 : oldSlot,
1361 : ExecGetReturningSlot(estate,
1362 : resultRelInfo));
1363 :
1364 10 : oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1365 10 : ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1366 : }
1367 : }
1368 :
1369 2768 : result = ExecProcessReturning(context, resultRelInfo, false,
1370 : oldSlot, slot, planSlot);
1371 :
1372 : /*
1373 : * For a cross-partition UPDATE, release the old tuple, first making
1374 : * sure that the result slot has a local copy of any pass-by-reference
1375 : * values.
1376 : */
1377 2760 : if (context->cpDeletedSlot)
1378 : {
1379 30 : ExecMaterializeSlot(result);
1380 30 : ExecClearTuple(oldSlot);
1381 30 : if (context->cpDeletedSlot != oldSlot)
1382 10 : ExecClearTuple(context->cpDeletedSlot);
1383 30 : context->cpDeletedSlot = NULL;
1384 : }
1385 : }
1386 :
1387 7733436 : if (inserted_tuple)
1388 565 : *inserted_tuple = slot;
1389 7733436 : if (insert_destrel)
1390 565 : *insert_destrel = resultRelInfo;
1391 :
1392 7733436 : return result;
1393 : }
1394 :
1395 : /* ----------------------------------------------------------------
1396 : * ExecForPortionOfLeftovers
1397 : *
1398 : * Insert tuples for the untouched portion of a row in a FOR
1399 : * PORTION OF UPDATE/DELETE
1400 : * ----------------------------------------------------------------
1401 : */
1402 : static void
1403 783 : ExecForPortionOfLeftovers(ModifyTableContext *context,
1404 : EState *estate,
1405 : ResultRelInfo *resultRelInfo,
1406 : ItemPointer tupleid)
1407 : {
1408 783 : ModifyTableState *mtstate = context->mtstate;
1409 783 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
1410 783 : ForPortionOfExpr *forPortionOf = (ForPortionOfExpr *) node->forPortionOf;
1411 : AttrNumber rangeAttno;
1412 : Datum oldRange;
1413 : TypeCacheEntry *typcache;
1414 : ForPortionOfState *fpoState;
1415 : TupleTableSlot *oldtupleSlot;
1416 : TupleTableSlot *leftoverSlot;
1417 783 : TupleConversionMap *map = NULL;
1418 783 : HeapTuple oldtuple = NULL;
1419 : CmdType oldOperation;
1420 : TransitionCaptureState *oldTcs;
1421 : FmgrInfo flinfo;
1422 : ReturnSetInfo rsi;
1423 783 : bool didInit = false;
1424 783 : bool shouldFree = false;
1425 :
1426 783 : LOCAL_FCINFO(fcinfo, 2);
1427 :
1428 783 : if (!resultRelInfo->ri_forPortionOf)
1429 : {
1430 : /*
1431 : * If we don't have a ForPortionOfState yet, we must be a partition
1432 : * child being hit for the first time. Make a copy from the root, with
1433 : * our own tupleTableSlot. We do this lazily so that we don't pay the
1434 : * price of unused partitions.
1435 : */
1436 56 : ForPortionOfState *leafState = makeNode(ForPortionOfState);
1437 :
1438 56 : if (!mtstate->rootResultRelInfo)
1439 0 : elog(ERROR, "no root relation but ri_forPortionOf is uninitialized");
1440 :
1441 56 : fpoState = mtstate->rootResultRelInfo->ri_forPortionOf;
1442 : Assert(fpoState);
1443 :
1444 56 : leafState->fp_rangeName = fpoState->fp_rangeName;
1445 56 : leafState->fp_rangeType = fpoState->fp_rangeType;
1446 56 : leafState->fp_rangeAttno = fpoState->fp_rangeAttno;
1447 56 : leafState->fp_targetRange = fpoState->fp_targetRange;
1448 56 : leafState->fp_Leftover = fpoState->fp_Leftover;
1449 : /* Each partition needs a slot matching its tuple descriptor */
1450 56 : leafState->fp_Existing =
1451 56 : table_slot_create(resultRelInfo->ri_RelationDesc,
1452 56 : &mtstate->ps.state->es_tupleTable);
1453 :
1454 56 : resultRelInfo->ri_forPortionOf = leafState;
1455 : }
1456 783 : fpoState = resultRelInfo->ri_forPortionOf;
1457 783 : oldtupleSlot = fpoState->fp_Existing;
1458 783 : leftoverSlot = fpoState->fp_Leftover;
1459 :
1460 : /*
1461 : * Get the old pre-UPDATE/DELETE tuple. We will use its range to compute
1462 : * untouched parts of history, and if necessary we will insert copies with
1463 : * truncated start/end times.
1464 : *
1465 : * We have already locked the tuple in ExecUpdate/ExecDelete, and it has
1466 : * passed EvalPlanQual. This ensures that concurrent updates in READ
1467 : * COMMITTED can't insert conflicting temporal leftovers.
1468 : *
1469 : * It does *not* protect against concurrent update/deletes overlooking
1470 : * each others' leftovers though. See our isolation tests for details
1471 : * about that and a viable workaround.
1472 : */
1473 783 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc, tupleid, SnapshotAny, oldtupleSlot))
1474 0 : elog(ERROR, "failed to fetch tuple for FOR PORTION OF");
1475 :
1476 : /*
1477 : * Get the old range of the record being updated/deleted. Must read with
1478 : * the attno of the leaf partition being updated.
1479 : */
1480 :
1481 783 : rangeAttno = forPortionOf->rangeVar->varattno;
1482 783 : if (resultRelInfo->ri_RootResultRelInfo)
1483 64 : map = ExecGetChildToRootMap(resultRelInfo);
1484 783 : if (map != NULL)
1485 16 : rangeAttno = map->attrMap->attnums[rangeAttno - 1];
1486 783 : slot_getallattrs(oldtupleSlot);
1487 :
1488 783 : if (oldtupleSlot->tts_isnull[rangeAttno - 1])
1489 0 : elog(ERROR, "found a NULL range in a temporal table");
1490 783 : oldRange = oldtupleSlot->tts_values[rangeAttno - 1];
1491 :
1492 : /*
1493 : * Get the range's type cache entry. This is worth caching for the whole
1494 : * UPDATE/DELETE as range functions do.
1495 : */
1496 :
1497 783 : typcache = fpoState->fp_leftoverstypcache;
1498 783 : if (typcache == NULL)
1499 : {
1500 648 : typcache = lookup_type_cache(forPortionOf->rangeType, 0);
1501 648 : fpoState->fp_leftoverstypcache = typcache;
1502 : }
1503 :
1504 : /*
1505 : * Get the ranges to the left/right of the targeted range. We call a SETOF
1506 : * support function and insert as many temporal leftovers as it gives us.
1507 : * Although rangetypes have 0/1/2 leftovers, multiranges have 0/1, and
1508 : * other types may have more.
1509 : */
1510 :
1511 783 : fmgr_info(forPortionOf->withoutPortionProc, &flinfo);
1512 783 : rsi.type = T_ReturnSetInfo;
1513 783 : rsi.econtext = mtstate->ps.ps_ExprContext;
1514 783 : rsi.expectedDesc = NULL;
1515 783 : rsi.allowedModes = (int) (SFRM_ValuePerCall);
1516 783 : rsi.returnMode = SFRM_ValuePerCall;
1517 783 : rsi.setResult = NULL;
1518 783 : rsi.setDesc = NULL;
1519 :
1520 783 : InitFunctionCallInfoData(*fcinfo, &flinfo, 2, InvalidOid, NULL, (Node *) &rsi);
1521 783 : fcinfo->args[0].value = oldRange;
1522 783 : fcinfo->args[0].isnull = false;
1523 783 : fcinfo->args[1].value = fpoState->fp_targetRange;
1524 783 : fcinfo->args[1].isnull = false;
1525 :
1526 : /*
1527 : * If there are partitions, we must insert into the root table, so we get
1528 : * tuple routing. We already set up leftoverSlot with the root tuple
1529 : * descriptor.
1530 : */
1531 783 : if (resultRelInfo->ri_RootResultRelInfo)
1532 64 : resultRelInfo = resultRelInfo->ri_RootResultRelInfo;
1533 :
1534 : /*
1535 : * Insert a leftover for each value returned by the without_portion helper
1536 : * function
1537 : */
1538 : while (true)
1539 1028 : {
1540 1811 : Datum leftover = FunctionCallInvoke(fcinfo);
1541 :
1542 : /* Are we done? */
1543 1811 : if (rsi.isDone == ExprEndResult)
1544 751 : break;
1545 :
1546 1060 : if (fcinfo->isnull)
1547 0 : elog(ERROR, "Got a null from without_portion function");
1548 :
1549 : /*
1550 : * Does the new Datum violate domain checks? Row-level CHECK
1551 : * constraints are validated by ExecInsert, so we don't need to do
1552 : * anything here for those.
1553 : */
1554 1060 : if (forPortionOf->isDomain)
1555 80 : domain_check(leftover, false, forPortionOf->rangeVar->vartype, NULL, NULL);
1556 :
1557 1044 : if (!didInit)
1558 : {
1559 : /*
1560 : * Make a copy of the pre-UPDATE row. Then we'll overwrite the
1561 : * range column below. Convert oldtuple to the base table's format
1562 : * if necessary. We need to insert temporal leftovers through the
1563 : * root partition so they get routed correctly.
1564 : */
1565 679 : if (map != NULL)
1566 : {
1567 16 : leftoverSlot = execute_attr_map_slot(map->attrMap,
1568 : oldtupleSlot,
1569 : leftoverSlot);
1570 : }
1571 : else
1572 : {
1573 663 : oldtuple = ExecFetchSlotHeapTuple(oldtupleSlot, false, &shouldFree);
1574 663 : ExecForceStoreHeapTuple(oldtuple, leftoverSlot, false);
1575 : }
1576 :
1577 : /*
1578 : * Save some mtstate things so we can restore them below. XXX:
1579 : * Should we create our own ModifyTableState instead?
1580 : */
1581 679 : oldOperation = mtstate->operation;
1582 679 : mtstate->operation = CMD_INSERT;
1583 679 : oldTcs = mtstate->mt_transition_capture;
1584 :
1585 679 : didInit = true;
1586 : }
1587 :
1588 1044 : leftoverSlot->tts_values[forPortionOf->rangeVar->varattno - 1] = leftover;
1589 1044 : leftoverSlot->tts_isnull[forPortionOf->rangeVar->varattno - 1] = false;
1590 1044 : ExecMaterializeSlot(leftoverSlot);
1591 :
1592 : /*
1593 : * The standard says that each temporal leftover should execute its
1594 : * own INSERT statement, firing all statement and row triggers, but
1595 : * skipping insert permission checks. Therefore we give each insert
1596 : * its own transition table. If we just push & pop a new trigger level
1597 : * for each insert, we get exactly what we need.
1598 : *
1599 : * We have to make sure that the inserts don't add to the ROW_COUNT
1600 : * diagnostic or the command tag, so we pass false for canSetTag.
1601 : */
1602 1044 : AfterTriggerBeginQuery();
1603 1044 : ExecSetupTransitionCaptureState(mtstate, estate);
1604 1044 : fireBSTriggers(mtstate);
1605 1044 : ExecInsert(context, resultRelInfo, leftoverSlot, false, NULL, NULL);
1606 1028 : fireASTriggers(mtstate);
1607 1028 : AfterTriggerEndQuery(estate);
1608 : }
1609 :
1610 751 : if (didInit)
1611 : {
1612 663 : mtstate->operation = oldOperation;
1613 663 : mtstate->mt_transition_capture = oldTcs;
1614 :
1615 663 : if (shouldFree)
1616 0 : heap_freetuple(oldtuple);
1617 : }
1618 751 : }
1619 :
1620 : /* ----------------------------------------------------------------
1621 : * ExecBatchInsert
1622 : *
1623 : * Insert multiple tuples in an efficient way.
1624 : * Currently, this handles inserting into a foreign table without
1625 : * RETURNING clause.
1626 : * ----------------------------------------------------------------
1627 : */
1628 : static void
1629 29 : ExecBatchInsert(ModifyTableState *mtstate,
1630 : ResultRelInfo *resultRelInfo,
1631 : TupleTableSlot **slots,
1632 : TupleTableSlot **planSlots,
1633 : int numSlots,
1634 : EState *estate,
1635 : bool canSetTag)
1636 : {
1637 : int i;
1638 29 : int numInserted = numSlots;
1639 29 : TupleTableSlot *slot = NULL;
1640 : TupleTableSlot **rslots;
1641 :
1642 : /*
1643 : * insert into foreign table: let the FDW do it
1644 : */
1645 29 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1646 : resultRelInfo,
1647 : slots,
1648 : planSlots,
1649 : &numInserted);
1650 :
1651 173 : for (i = 0; i < numInserted; i++)
1652 : {
1653 145 : slot = rslots[i];
1654 :
1655 : /*
1656 : * AFTER ROW Triggers might reference the tableoid column, so
1657 : * (re-)initialize tts_tableOid before evaluating them.
1658 : */
1659 145 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1660 :
1661 : /* AFTER ROW INSERT Triggers */
1662 145 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1663 145 : mtstate->mt_transition_capture);
1664 :
1665 : /*
1666 : * Check any WITH CHECK OPTION constraints from parent views. See the
1667 : * comment in ExecInsert.
1668 : */
1669 144 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1670 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1671 : }
1672 :
1673 28 : if (canSetTag && numInserted > 0)
1674 28 : estate->es_processed += numInserted;
1675 :
1676 : /* Clean up all the slots, ready for the next batch */
1677 172 : for (i = 0; i < numSlots; i++)
1678 : {
1679 144 : ExecClearTuple(slots[i]);
1680 144 : ExecClearTuple(planSlots[i]);
1681 : }
1682 28 : resultRelInfo->ri_NumSlots = 0;
1683 28 : }
1684 :
1685 : /*
1686 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1687 : */
1688 : static void
1689 18 : ExecPendingInserts(EState *estate)
1690 : {
1691 : ListCell *l1,
1692 : *l2;
1693 :
1694 36 : forboth(l1, estate->es_insert_pending_result_relations,
1695 : l2, estate->es_insert_pending_modifytables)
1696 : {
1697 19 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1698 19 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1699 :
1700 : Assert(mtstate);
1701 19 : ExecBatchInsert(mtstate, resultRelInfo,
1702 : resultRelInfo->ri_Slots,
1703 : resultRelInfo->ri_PlanSlots,
1704 : resultRelInfo->ri_NumSlots,
1705 19 : estate, mtstate->canSetTag);
1706 : }
1707 :
1708 17 : list_free(estate->es_insert_pending_result_relations);
1709 17 : list_free(estate->es_insert_pending_modifytables);
1710 17 : estate->es_insert_pending_result_relations = NIL;
1711 17 : estate->es_insert_pending_modifytables = NIL;
1712 17 : }
1713 :
1714 : /*
1715 : * ExecDeletePrologue -- subroutine for ExecDelete
1716 : *
1717 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1718 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1719 : * the delete a no-op; otherwise, return true.
1720 : */
1721 : static bool
1722 1022122 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1723 : ItemPointer tupleid, HeapTuple oldtuple,
1724 : TupleTableSlot **epqreturnslot, TM_Result *result)
1725 : {
1726 1022122 : if (result)
1727 1066 : *result = TM_Ok;
1728 :
1729 : /* BEFORE ROW DELETE triggers */
1730 1022122 : if (resultRelInfo->ri_TrigDesc &&
1731 4711 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1732 : {
1733 : /* Flush any pending inserts, so rows are visible to the triggers */
1734 217 : if (context->estate->es_insert_pending_result_relations != NIL)
1735 1 : ExecPendingInserts(context->estate);
1736 :
1737 207 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1738 : resultRelInfo, tupleid, oldtuple,
1739 : epqreturnslot, result, &context->tmfd,
1740 217 : context->mtstate->operation == CMD_MERGE);
1741 : }
1742 :
1743 1021905 : return true;
1744 : }
1745 :
1746 : /*
1747 : * ExecDeleteAct -- subroutine for ExecDelete
1748 : *
1749 : * Actually delete the tuple from a plain table.
1750 : *
1751 : * Caller is in charge of doing EvalPlanQual as necessary
1752 : */
1753 : static TM_Result
1754 1022016 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1755 : ItemPointer tupleid, bool changingPart)
1756 : {
1757 1022016 : EState *estate = context->estate;
1758 1022016 : uint32 options = 0;
1759 :
1760 1022016 : if (changingPart)
1761 696 : options |= TABLE_DELETE_CHANGING_PARTITION;
1762 :
1763 1022016 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1764 : estate->es_output_cid,
1765 : options,
1766 : estate->es_snapshot,
1767 : estate->es_crosscheck_snapshot,
1768 : true /* wait for commit */ ,
1769 : &context->tmfd);
1770 : }
1771 :
1772 : /*
1773 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1774 : *
1775 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1776 : * including the UPDATE triggers if the deletion is being done as part of a
1777 : * cross-partition tuple move. It also inserts temporal leftovers from a
1778 : * DELETE FOR PORTION OF.
1779 : */
1780 : static void
1781 1021963 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1782 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1783 : {
1784 1021963 : ModifyTableState *mtstate = context->mtstate;
1785 1021963 : EState *estate = context->estate;
1786 : TransitionCaptureState *ar_delete_trig_tcs;
1787 :
1788 : /*
1789 : * If this delete is the result of a partition key update that moved the
1790 : * tuple to a new partition, put this row into the transition OLD TABLE,
1791 : * if there is one. We need to do this separately for DELETE and INSERT
1792 : * because they happen on different tables.
1793 : */
1794 1021963 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1795 1021963 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1796 36 : mtstate->mt_transition_capture->tcs_update_old_table)
1797 : {
1798 32 : ExecARUpdateTriggers(estate, resultRelInfo,
1799 : NULL, NULL,
1800 : tupleid, oldtuple,
1801 32 : NULL, NULL, mtstate->mt_transition_capture,
1802 : false);
1803 :
1804 : /*
1805 : * We've already captured the OLD TABLE row, so make sure any AR
1806 : * DELETE trigger fired below doesn't capture it again.
1807 : */
1808 32 : ar_delete_trig_tcs = NULL;
1809 : }
1810 :
1811 : /* Compute temporal leftovers in FOR PORTION OF */
1812 1021963 : if (((ModifyTable *) context->mtstate->ps.plan)->forPortionOf)
1813 353 : ExecForPortionOfLeftovers(context, estate, resultRelInfo, tupleid);
1814 :
1815 : /* AFTER ROW DELETE Triggers */
1816 1021947 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1817 : ar_delete_trig_tcs, changingPart);
1818 1021945 : }
1819 :
1820 : /* ----------------------------------------------------------------
1821 : * ExecDelete
1822 : *
1823 : * DELETE is like UPDATE, except that we delete the tuple and no
1824 : * index modifications are needed.
1825 : *
1826 : * When deleting from a table, tupleid identifies the tuple to delete and
1827 : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1828 : * oldtuple is passed to the triggers and identifies what to delete, and
1829 : * tupleid is invalid. When deleting from a foreign table, tupleid is
1830 : * invalid; the FDW has to figure out which row to delete using data from
1831 : * the planSlot. oldtuple is passed to foreign table triggers; it is
1832 : * NULL when the foreign table has no relevant triggers. We use
1833 : * tupleDeleted to indicate whether the tuple is actually deleted,
1834 : * callers can use it to decide whether to continue the operation. When
1835 : * this DELETE is a part of an UPDATE of partition-key, then the slot
1836 : * returned by EvalPlanQual() is passed back using output parameter
1837 : * epqreturnslot.
1838 : *
1839 : * Returns RETURNING result if any, otherwise NULL.
1840 : * ----------------------------------------------------------------
1841 : */
1842 : static TupleTableSlot *
1843 1021774 : ExecDelete(ModifyTableContext *context,
1844 : ResultRelInfo *resultRelInfo,
1845 : ItemPointer tupleid,
1846 : HeapTuple oldtuple,
1847 : bool processReturning,
1848 : bool changingPart,
1849 : bool canSetTag,
1850 : TM_Result *tmresult,
1851 : bool *tupleDeleted,
1852 : TupleTableSlot **epqreturnslot)
1853 : {
1854 1021774 : EState *estate = context->estate;
1855 1021774 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1856 1021774 : TupleTableSlot *slot = NULL;
1857 : TM_Result result;
1858 : bool saveOld;
1859 :
1860 1021774 : if (tupleDeleted)
1861 718 : *tupleDeleted = false;
1862 :
1863 : /*
1864 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1865 : * done if it says we are.
1866 : */
1867 1021774 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1868 : epqreturnslot, tmresult))
1869 33 : return NULL;
1870 :
1871 : /* INSTEAD OF ROW DELETE Triggers */
1872 1021731 : if (resultRelInfo->ri_TrigDesc &&
1873 4624 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1874 31 : {
1875 : bool dodelete;
1876 :
1877 : Assert(oldtuple != NULL);
1878 35 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1879 :
1880 35 : if (!dodelete) /* "do nothing" */
1881 4 : return NULL;
1882 : }
1883 1021696 : else if (resultRelInfo->ri_FdwRoutine)
1884 : {
1885 : /*
1886 : * delete from foreign table: let the FDW do it
1887 : *
1888 : * We offer the returning slot as a place to store RETURNING data,
1889 : * although the FDW can return some other slot if it wants.
1890 : */
1891 23 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1892 23 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1893 : resultRelInfo,
1894 : slot,
1895 : context->planSlot);
1896 :
1897 23 : if (slot == NULL) /* "do nothing" */
1898 0 : return NULL;
1899 :
1900 : /*
1901 : * RETURNING expressions might reference the tableoid column, so
1902 : * (re)initialize tts_tableOid before evaluating them.
1903 : */
1904 23 : if (TTS_EMPTY(slot))
1905 5 : ExecStoreAllNullTuple(slot);
1906 :
1907 23 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1908 : }
1909 : else
1910 : {
1911 : /*
1912 : * delete the tuple
1913 : *
1914 : * Note: if context->estate->es_crosscheck_snapshot isn't
1915 : * InvalidSnapshot, we check that the row to be deleted is visible to
1916 : * that snapshot, and throw a can't-serialize error if not. This is a
1917 : * special-case behavior needed for referential integrity updates in
1918 : * transaction-snapshot mode transactions.
1919 : */
1920 1021673 : ldelete:
1921 1021679 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1922 :
1923 1021661 : if (tmresult)
1924 696 : *tmresult = result;
1925 :
1926 1021661 : switch (result)
1927 : {
1928 20 : case TM_SelfModified:
1929 :
1930 : /*
1931 : * The target tuple was already updated or deleted by the
1932 : * current command, or by a later command in the current
1933 : * transaction. The former case is possible in a join DELETE
1934 : * where multiple tuples join to the same target tuple. This
1935 : * is somewhat questionable, but Postgres has always allowed
1936 : * it: we just ignore additional deletion attempts.
1937 : *
1938 : * The latter case arises if the tuple is modified by a
1939 : * command in a BEFORE trigger, or perhaps by a command in a
1940 : * volatile function used in the query. In such situations we
1941 : * should not ignore the deletion, but it is equally unsafe to
1942 : * proceed. We don't want to discard the original DELETE
1943 : * while keeping the triggered actions based on its deletion;
1944 : * and it would be no better to allow the original DELETE
1945 : * while discarding updates that it triggered. The row update
1946 : * carries some information that might be important according
1947 : * to business rules; so throwing an error is the only safe
1948 : * course.
1949 : *
1950 : * If a trigger actually intends this type of interaction, it
1951 : * can re-execute the DELETE and then return NULL to cancel
1952 : * the outer delete.
1953 : */
1954 20 : if (context->tmfd.cmax != estate->es_output_cid)
1955 4 : ereport(ERROR,
1956 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1957 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1958 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1959 :
1960 : /* Else, already deleted by self; nothing to do */
1961 16 : return NULL;
1962 :
1963 1021579 : case TM_Ok:
1964 1021579 : break;
1965 :
1966 47 : case TM_Updated:
1967 : {
1968 : TupleTableSlot *inputslot;
1969 : TupleTableSlot *epqslot;
1970 :
1971 47 : if (IsolationUsesXactSnapshot())
1972 10 : ereport(ERROR,
1973 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1974 : errmsg("could not serialize access due to concurrent update")));
1975 :
1976 : /*
1977 : * Already know that we're going to need to do EPQ, so
1978 : * fetch tuple directly into the right slot.
1979 : */
1980 37 : EvalPlanQualBegin(context->epqstate);
1981 37 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1982 : resultRelInfo->ri_RangeTableIndex);
1983 :
1984 37 : result = table_tuple_lock(resultRelationDesc, tupleid,
1985 : estate->es_snapshot,
1986 : inputslot, estate->es_output_cid,
1987 : LockTupleExclusive, LockWaitBlock,
1988 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1989 : &context->tmfd);
1990 :
1991 33 : switch (result)
1992 : {
1993 30 : case TM_Ok:
1994 : Assert(context->tmfd.traversed);
1995 30 : epqslot = EvalPlanQual(context->epqstate,
1996 : resultRelationDesc,
1997 : resultRelInfo->ri_RangeTableIndex,
1998 : inputslot);
1999 30 : if (TupIsNull(epqslot))
2000 : /* Tuple not passing quals anymore, exiting... */
2001 16 : return NULL;
2002 :
2003 : /*
2004 : * If requested, skip delete and pass back the
2005 : * updated row.
2006 : */
2007 14 : if (epqreturnslot)
2008 : {
2009 8 : *epqreturnslot = epqslot;
2010 8 : return NULL;
2011 : }
2012 : else
2013 6 : goto ldelete;
2014 :
2015 2 : case TM_SelfModified:
2016 :
2017 : /*
2018 : * This can be reached when following an update
2019 : * chain from a tuple updated by another session,
2020 : * reaching a tuple that was already updated in
2021 : * this transaction. If previously updated by this
2022 : * command, ignore the delete, otherwise error
2023 : * out.
2024 : *
2025 : * See also TM_SelfModified response to
2026 : * table_tuple_delete() above.
2027 : */
2028 2 : if (context->tmfd.cmax != estate->es_output_cid)
2029 1 : ereport(ERROR,
2030 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2031 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
2032 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2033 1 : return NULL;
2034 :
2035 1 : case TM_Deleted:
2036 : /* tuple already deleted; nothing to do */
2037 1 : return NULL;
2038 :
2039 0 : default:
2040 :
2041 : /*
2042 : * TM_Invisible should be impossible because we're
2043 : * waiting for updated row versions, and would
2044 : * already have errored out if the first version
2045 : * is invisible.
2046 : *
2047 : * TM_Updated should be impossible, because we're
2048 : * locking the latest version via
2049 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
2050 : */
2051 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2052 : result);
2053 : return NULL;
2054 : }
2055 :
2056 : Assert(false);
2057 : break;
2058 : }
2059 :
2060 15 : case TM_Deleted:
2061 15 : if (IsolationUsesXactSnapshot())
2062 9 : ereport(ERROR,
2063 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2064 : errmsg("could not serialize access due to concurrent delete")));
2065 : /* tuple already deleted; nothing to do */
2066 6 : return NULL;
2067 :
2068 0 : default:
2069 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
2070 : result);
2071 : return NULL;
2072 : }
2073 :
2074 : /*
2075 : * Note: Normally one would think that we have to delete index tuples
2076 : * associated with the heap tuple now...
2077 : *
2078 : * ... but in POSTGRES, we have no need to do this because VACUUM will
2079 : * take care of it later. We can't delete index tuples immediately
2080 : * anyway, since the tuple is still visible to other transactions.
2081 : */
2082 : }
2083 :
2084 1021633 : if (canSetTag)
2085 1020816 : (estate->es_processed)++;
2086 :
2087 : /* Tell caller that the delete actually happened. */
2088 1021633 : if (tupleDeleted)
2089 666 : *tupleDeleted = true;
2090 :
2091 1021633 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
2092 :
2093 : /*
2094 : * Process RETURNING if present and if requested.
2095 : *
2096 : * If this is part of a cross-partition UPDATE, and the RETURNING list
2097 : * refers to any OLD column values, save the old tuple here for later
2098 : * processing of the RETURNING list by ExecInsert().
2099 : */
2100 1021710 : saveOld = changingPart && resultRelInfo->ri_projectReturning &&
2101 95 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD;
2102 :
2103 1021615 : if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
2104 : {
2105 : /*
2106 : * We have to put the target tuple into a slot, which means first we
2107 : * gotta fetch it. We can use the trigger tuple slot.
2108 : */
2109 : TupleTableSlot *rslot;
2110 :
2111 624 : if (resultRelInfo->ri_FdwRoutine)
2112 : {
2113 : /* FDW must have provided a slot containing the deleted row */
2114 : Assert(!TupIsNull(slot));
2115 : }
2116 : else
2117 : {
2118 617 : slot = ExecGetReturningSlot(estate, resultRelInfo);
2119 617 : if (oldtuple != NULL)
2120 : {
2121 16 : ExecForceStoreHeapTuple(oldtuple, slot, false);
2122 : }
2123 : else
2124 : {
2125 601 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
2126 : SnapshotAny, slot))
2127 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
2128 : }
2129 : }
2130 :
2131 : /*
2132 : * If required, save the old tuple for later processing of the
2133 : * RETURNING list by ExecInsert().
2134 : */
2135 624 : if (saveOld)
2136 : {
2137 : TupleConversionMap *tupconv_map;
2138 :
2139 : /*
2140 : * Convert the tuple into the root partition's format/slot, if
2141 : * needed. ExecInsert() will then convert it to the new
2142 : * partition's format/slot, if necessary.
2143 : */
2144 30 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2145 30 : if (tupconv_map != NULL)
2146 : {
2147 12 : ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
2148 12 : TupleTableSlot *oldSlot = slot;
2149 :
2150 12 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2151 : slot,
2152 : ExecGetReturningSlot(estate,
2153 : rootRelInfo));
2154 :
2155 12 : slot->tts_tableOid = oldSlot->tts_tableOid;
2156 12 : ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
2157 : }
2158 :
2159 30 : context->cpDeletedSlot = slot;
2160 :
2161 30 : return NULL;
2162 : }
2163 :
2164 594 : rslot = ExecProcessReturning(context, resultRelInfo, true,
2165 : slot, NULL, context->planSlot);
2166 :
2167 : /*
2168 : * Before releasing the target tuple again, make sure rslot has a
2169 : * local copy of any pass-by-reference values.
2170 : */
2171 594 : ExecMaterializeSlot(rslot);
2172 :
2173 594 : ExecClearTuple(slot);
2174 :
2175 594 : return rslot;
2176 : }
2177 :
2178 1020991 : return NULL;
2179 : }
2180 :
2181 : /*
2182 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
2183 : *
2184 : * This works by first deleting the old tuple from the current partition,
2185 : * followed by inserting the new tuple into the root parent table, that is,
2186 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
2187 : * correct partition.
2188 : *
2189 : * Returns true if the tuple has been successfully moved, or if it's found
2190 : * that the tuple was concurrently deleted so there's nothing more to do
2191 : * for the caller.
2192 : *
2193 : * False is returned if the tuple we're trying to move is found to have been
2194 : * concurrently updated. In that case, the caller must check if the updated
2195 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
2196 : * this function again or perform a regular update accordingly. For MERGE,
2197 : * the updated tuple is not returned in *retry_slot; it has its own retry
2198 : * logic.
2199 : */
2200 : static bool
2201 750 : ExecCrossPartitionUpdate(ModifyTableContext *context,
2202 : ResultRelInfo *resultRelInfo,
2203 : ItemPointer tupleid, HeapTuple oldtuple,
2204 : TupleTableSlot *slot,
2205 : bool canSetTag,
2206 : UpdateContext *updateCxt,
2207 : TM_Result *tmresult,
2208 : TupleTableSlot **retry_slot,
2209 : TupleTableSlot **inserted_tuple,
2210 : ResultRelInfo **insert_destrel)
2211 : {
2212 750 : ModifyTableState *mtstate = context->mtstate;
2213 750 : EState *estate = mtstate->ps.state;
2214 : TupleConversionMap *tupconv_map;
2215 : bool tuple_deleted;
2216 750 : TupleTableSlot *epqslot = NULL;
2217 :
2218 750 : context->cpDeletedSlot = NULL;
2219 750 : context->cpUpdateReturningSlot = NULL;
2220 750 : *retry_slot = NULL;
2221 :
2222 : /*
2223 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
2224 : * to migrate to a different partition. Maybe this can be implemented
2225 : * some day, but it seems a fringe feature with little redeeming value.
2226 : */
2227 750 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
2228 0 : ereport(ERROR,
2229 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2230 : errmsg("invalid ON UPDATE specification"),
2231 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
2232 :
2233 : /*
2234 : * When an UPDATE is run directly on a leaf partition, simply fail with a
2235 : * partition constraint violation error.
2236 : */
2237 750 : if (resultRelInfo == mtstate->rootResultRelInfo)
2238 32 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
2239 :
2240 : /*
2241 : * Initialize tuple routing info if not already done. Note whatever we do
2242 : * here must be done in ExecInitModifyTable for FOR PORTION OF as well.
2243 : */
2244 718 : if (mtstate->mt_partition_tuple_routing == NULL)
2245 : {
2246 439 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
2247 : MemoryContext oldcxt;
2248 :
2249 : /* Things built here have to last for the query duration. */
2250 439 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
2251 :
2252 439 : mtstate->mt_partition_tuple_routing =
2253 439 : ExecSetupPartitionTupleRouting(estate, rootRel);
2254 :
2255 : /*
2256 : * Before a partition's tuple can be re-routed, it must first be
2257 : * converted to the root's format, so we'll need a slot for storing
2258 : * such tuples.
2259 : */
2260 : Assert(mtstate->mt_root_tuple_slot == NULL);
2261 439 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
2262 :
2263 439 : MemoryContextSwitchTo(oldcxt);
2264 : }
2265 :
2266 : /*
2267 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
2268 : * We want to return rows from INSERT.
2269 : */
2270 718 : ExecDelete(context, resultRelInfo,
2271 : tupleid, oldtuple,
2272 : false, /* processReturning */
2273 : true, /* changingPart */
2274 : false, /* canSetTag */
2275 : tmresult, &tuple_deleted, &epqslot);
2276 :
2277 : /*
2278 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
2279 : * it was already deleted by self, or it was concurrently deleted by
2280 : * another transaction), then we should skip the insert as well;
2281 : * otherwise, an UPDATE could cause an increase in the total number of
2282 : * rows across all partitions, which is clearly wrong.
2283 : *
2284 : * For a normal UPDATE, the case where the tuple has been the subject of a
2285 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
2286 : * machinery, but for an UPDATE that we've translated into a DELETE from
2287 : * this partition and an INSERT into some other partition, that's not
2288 : * available, because CTID chains can't span relation boundaries. We
2289 : * mimic the semantics to a limited extent by skipping the INSERT if the
2290 : * DELETE fails to find a tuple. This ensures that two concurrent
2291 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
2292 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
2293 : * it.
2294 : */
2295 715 : if (!tuple_deleted)
2296 : {
2297 : /*
2298 : * epqslot will be typically NULL. But when ExecDelete() finds that
2299 : * another transaction has concurrently updated the same row, it
2300 : * re-fetches the row, skips the delete, and epqslot is set to the
2301 : * re-fetched tuple slot. In that case, we need to do all the checks
2302 : * again. For MERGE, we leave everything to the caller (it must do
2303 : * additional rechecking, and might end up executing a different
2304 : * action entirely).
2305 : */
2306 49 : if (mtstate->operation == CMD_MERGE)
2307 23 : return *tmresult == TM_Ok;
2308 26 : else if (TupIsNull(epqslot))
2309 23 : return true;
2310 : else
2311 : {
2312 : /* Fetch the most recent version of old tuple. */
2313 : TupleTableSlot *oldSlot;
2314 :
2315 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
2316 3 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2317 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
2318 3 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2319 3 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2320 : tupleid,
2321 : SnapshotAny,
2322 : oldSlot))
2323 0 : elog(ERROR, "failed to fetch tuple being updated");
2324 : /* and project the new tuple to retry the UPDATE with */
2325 3 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2326 : oldSlot);
2327 3 : return false;
2328 : }
2329 : }
2330 :
2331 : /*
2332 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
2333 : * convert the tuple into root's tuple descriptor if needed, since
2334 : * ExecInsert() starts the search from root.
2335 : */
2336 666 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2337 666 : if (tupconv_map != NULL)
2338 217 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2339 : slot,
2340 : mtstate->mt_root_tuple_slot);
2341 :
2342 : /* Tuple routing starts from the root table. */
2343 583 : context->cpUpdateReturningSlot =
2344 666 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2345 : inserted_tuple, insert_destrel);
2346 :
2347 : /*
2348 : * Reset the transition state that may possibly have been written by
2349 : * INSERT.
2350 : */
2351 583 : if (mtstate->mt_transition_capture)
2352 36 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
2353 :
2354 : /* We're done moving. */
2355 583 : return true;
2356 : }
2357 :
2358 : /*
2359 : * ExecUpdatePrologue -- subroutine for ExecUpdate
2360 : *
2361 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
2362 : * triggers. We return false if one of them makes the update a no-op;
2363 : * otherwise, return true.
2364 : */
2365 : static bool
2366 2215503 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2367 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2368 : TM_Result *result)
2369 : {
2370 2215503 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2371 :
2372 2215503 : if (result)
2373 1413 : *result = TM_Ok;
2374 :
2375 2215503 : ExecMaterializeSlot(slot);
2376 :
2377 : /*
2378 : * Open the table's indexes, if we have not done so already, so that we
2379 : * can add new index entries for the updated tuple.
2380 : */
2381 2215503 : if (resultRelationDesc->rd_rel->relhasindex &&
2382 146450 : resultRelInfo->ri_IndexRelationDescs == NULL)
2383 5763 : ExecOpenIndices(resultRelInfo, false);
2384 :
2385 : /* BEFORE ROW UPDATE triggers */
2386 2215503 : if (resultRelInfo->ri_TrigDesc &&
2387 3995 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
2388 : {
2389 : /* Flush any pending inserts, so rows are visible to the triggers */
2390 1569 : if (context->estate->es_insert_pending_result_relations != NIL)
2391 1 : ExecPendingInserts(context->estate);
2392 :
2393 1557 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
2394 : resultRelInfo, tupleid, oldtuple, slot,
2395 : result, &context->tmfd,
2396 1569 : context->mtstate->operation == CMD_MERGE);
2397 : }
2398 :
2399 2213934 : return true;
2400 : }
2401 :
2402 : /*
2403 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2404 : *
2405 : * Apply the final modifications to the tuple slot before the update.
2406 : * (This is split out because we also need it in the foreign-table code path.)
2407 : */
2408 : static void
2409 2215317 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
2410 : TupleTableSlot *slot,
2411 : EState *estate)
2412 : {
2413 2215317 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2414 :
2415 : /*
2416 : * Constraints and GENERATED expressions might reference the tableoid
2417 : * column, so (re-)initialize tts_tableOid before evaluating them.
2418 : */
2419 2215317 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2420 :
2421 : /*
2422 : * Compute stored generated columns
2423 : */
2424 2215317 : if (resultRelationDesc->rd_att->constr &&
2425 123467 : resultRelationDesc->rd_att->constr->has_generated_stored)
2426 158 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2427 : CMD_UPDATE);
2428 2215317 : }
2429 :
2430 : /*
2431 : * ExecUpdateAct -- subroutine for ExecUpdate
2432 : *
2433 : * Actually update the tuple, when operating on a plain table. If the
2434 : * table is a partition, and the command was called referencing an ancestor
2435 : * partitioned table, this routine migrates the resulting tuple to another
2436 : * partition.
2437 : *
2438 : * The caller is in charge of keeping indexes current as necessary. The
2439 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
2440 : * be concurrently updated. However, in case of a cross-partition update,
2441 : * this routine does it.
2442 : */
2443 : static TM_Result
2444 2215219 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2445 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2446 : bool canSetTag, UpdateContext *updateCxt)
2447 : {
2448 2215219 : EState *estate = context->estate;
2449 2215219 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2450 : bool partition_constraint_failed;
2451 : TM_Result result;
2452 :
2453 2215219 : updateCxt->crossPartUpdate = false;
2454 :
2455 : /*
2456 : * If we move the tuple to a new partition, we loop back here to recompute
2457 : * GENERATED values (which are allowed to be different across partitions)
2458 : * and recheck any RLS policies and constraints. We do not fire any
2459 : * BEFORE triggers of the new partition, however.
2460 : */
2461 2215222 : lreplace:
2462 : /* Fill in GENERATEd columns */
2463 2215222 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2464 :
2465 : /* ensure slot is independent, consider e.g. EPQ */
2466 2215222 : ExecMaterializeSlot(slot);
2467 :
2468 : /*
2469 : * If partition constraint fails, this row might get moved to another
2470 : * partition, in which case we should check the RLS CHECK policy just
2471 : * before inserting into the new partition, rather than doing it here.
2472 : * This is because a trigger on that partition might again change the row.
2473 : * So skip the WCO checks if the partition constraint fails.
2474 : */
2475 2215222 : partition_constraint_failed =
2476 2217051 : resultRelationDesc->rd_rel->relispartition &&
2477 1829 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2478 :
2479 : /* Check any RLS UPDATE WITH CHECK policies */
2480 2215222 : if (!partition_constraint_failed &&
2481 2214472 : resultRelInfo->ri_WithCheckOptions != NIL)
2482 : {
2483 : /*
2484 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2485 : * we are looking for at this point.
2486 : */
2487 356 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2488 : resultRelInfo, slot, estate);
2489 : }
2490 :
2491 : /*
2492 : * If a partition check failed, try to move the row into the right
2493 : * partition.
2494 : */
2495 2215186 : if (partition_constraint_failed)
2496 : {
2497 : TupleTableSlot *inserted_tuple,
2498 : *retry_slot;
2499 750 : ResultRelInfo *insert_destrel = NULL;
2500 :
2501 : /*
2502 : * ExecCrossPartitionUpdate will first DELETE the row from the
2503 : * partition it's currently in and then insert it back into the root
2504 : * table, which will re-route it to the correct partition. However,
2505 : * if the tuple has been concurrently updated, a retry is needed.
2506 : */
2507 750 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2508 : tupleid, oldtuple, slot,
2509 : canSetTag, updateCxt,
2510 : &result,
2511 : &retry_slot,
2512 : &inserted_tuple,
2513 : &insert_destrel))
2514 : {
2515 : /* success! */
2516 622 : updateCxt->crossPartUpdate = true;
2517 :
2518 : /*
2519 : * If the partitioned table being updated is referenced in foreign
2520 : * keys, queue up trigger events to check that none of them were
2521 : * violated. No special treatment is needed in
2522 : * non-cross-partition update situations, because the leaf
2523 : * partition's AR update triggers will take care of that. During
2524 : * cross-partition updates implemented as delete on the source
2525 : * partition followed by insert on the destination partition,
2526 : * AR-UPDATE triggers of the root table (that is, the table
2527 : * mentioned in the query) must be fired.
2528 : *
2529 : * NULL insert_destrel means that the move failed to occur, that
2530 : * is, the update failed, so no need to anything in that case.
2531 : */
2532 622 : if (insert_destrel &&
2533 565 : resultRelInfo->ri_TrigDesc &&
2534 242 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2535 202 : ExecCrossPartitionUpdateForeignKey(context,
2536 : resultRelInfo,
2537 : insert_destrel,
2538 : tupleid, slot,
2539 : inserted_tuple);
2540 :
2541 625 : return TM_Ok;
2542 : }
2543 :
2544 : /*
2545 : * No luck, a retry is needed. If running MERGE, we do not do so
2546 : * here; instead let it handle that on its own rules.
2547 : */
2548 10 : if (context->mtstate->operation == CMD_MERGE)
2549 7 : return result;
2550 :
2551 : /*
2552 : * ExecCrossPartitionUpdate installed an updated version of the new
2553 : * tuple in the retry slot; start over.
2554 : */
2555 3 : slot = retry_slot;
2556 3 : goto lreplace;
2557 : }
2558 :
2559 : /*
2560 : * Check the constraints of the tuple. We've already checked the
2561 : * partition constraint above; however, we must still ensure the tuple
2562 : * passes all other constraints, so we will call ExecConstraints() and
2563 : * have it validate all remaining checks.
2564 : */
2565 2214436 : if (resultRelationDesc->rd_att->constr)
2566 123066 : ExecConstraints(resultRelInfo, slot, estate);
2567 :
2568 : /*
2569 : * replace the heap tuple
2570 : *
2571 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2572 : * the row to be updated is visible to that snapshot, and throw a
2573 : * can't-serialize error if not. This is a special-case behavior needed
2574 : * for referential integrity updates in transaction-snapshot mode
2575 : * transactions.
2576 : */
2577 2214380 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2578 : estate->es_output_cid,
2579 : 0,
2580 : estate->es_snapshot,
2581 : estate->es_crosscheck_snapshot,
2582 : true /* wait for commit */ ,
2583 : &context->tmfd, &updateCxt->lockmode,
2584 : &updateCxt->updateIndexes);
2585 :
2586 2214368 : return result;
2587 : }
2588 :
2589 : /*
2590 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2591 : *
2592 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2593 : * returns indicating that the tuple was updated. It also inserts temporal
2594 : * leftovers from an UPDATE FOR PORTION OF.
2595 : */
2596 : static void
2597 2214369 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2598 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2599 : HeapTuple oldtuple, TupleTableSlot *slot)
2600 : {
2601 2214369 : ModifyTableState *mtstate = context->mtstate;
2602 2214369 : List *recheckIndexes = NIL;
2603 :
2604 : /* insert index entries for tuple if necessary */
2605 2214369 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2606 : {
2607 115977 : uint32 flags = EIIT_IS_UPDATE;
2608 :
2609 115977 : if (updateCxt->updateIndexes == TU_Summarizing)
2610 2188 : flags |= EIIT_ONLY_SUMMARIZING;
2611 115977 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo, context->estate,
2612 : flags, slot, NIL,
2613 : NULL);
2614 : }
2615 :
2616 : /* Compute temporal leftovers in FOR PORTION OF */
2617 2214308 : if (((ModifyTable *) context->mtstate->ps.plan)->forPortionOf)
2618 430 : ExecForPortionOfLeftovers(context, context->estate, resultRelInfo, tupleid);
2619 :
2620 : /* AFTER ROW UPDATE Triggers */
2621 2214292 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2622 : NULL, NULL,
2623 : tupleid, oldtuple, slot,
2624 : recheckIndexes,
2625 2214292 : mtstate->operation == CMD_INSERT ?
2626 : mtstate->mt_oc_transition_capture :
2627 : mtstate->mt_transition_capture,
2628 : false);
2629 :
2630 2214290 : list_free(recheckIndexes);
2631 :
2632 : /*
2633 : * Check any WITH CHECK OPTION constraints from parent views. We are
2634 : * required to do this after testing all constraints and uniqueness
2635 : * violations per the SQL spec, so we do it after actually updating the
2636 : * record in the heap and all indexes.
2637 : *
2638 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2639 : * are looking for at this point.
2640 : */
2641 2214290 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2642 337 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2643 : slot, context->estate);
2644 2214236 : }
2645 :
2646 : /*
2647 : * Queues up an update event using the target root partitioned table's
2648 : * trigger to check that a cross-partition update hasn't broken any foreign
2649 : * keys pointing into it.
2650 : */
2651 : static void
2652 202 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2653 : ResultRelInfo *sourcePartInfo,
2654 : ResultRelInfo *destPartInfo,
2655 : ItemPointer tupleid,
2656 : TupleTableSlot *oldslot,
2657 : TupleTableSlot *newslot)
2658 : {
2659 : ListCell *lc;
2660 : ResultRelInfo *rootRelInfo;
2661 : List *ancestorRels;
2662 :
2663 202 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2664 202 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2665 :
2666 : /*
2667 : * For any foreign keys that point directly into a non-root ancestors of
2668 : * the source partition, we can in theory fire an update event to enforce
2669 : * those constraints using their triggers, if we could tell that both the
2670 : * source and the destination partitions are under the same ancestor. But
2671 : * for now, we simply report an error that those cannot be enforced.
2672 : */
2673 440 : foreach(lc, ancestorRels)
2674 : {
2675 242 : ResultRelInfo *rInfo = lfirst(lc);
2676 242 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2677 242 : bool has_noncloned_fkey = false;
2678 :
2679 : /* Root ancestor's triggers will be processed. */
2680 242 : if (rInfo == rootRelInfo)
2681 198 : continue;
2682 :
2683 44 : if (trigdesc && trigdesc->trig_update_after_row)
2684 : {
2685 152 : for (int i = 0; i < trigdesc->numtriggers; i++)
2686 : {
2687 112 : Trigger *trig = &trigdesc->triggers[i];
2688 :
2689 116 : if (!trig->tgisclone &&
2690 4 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2691 : {
2692 4 : has_noncloned_fkey = true;
2693 4 : break;
2694 : }
2695 : }
2696 : }
2697 :
2698 44 : if (has_noncloned_fkey)
2699 4 : ereport(ERROR,
2700 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2701 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2702 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2703 : RelationGetRelationName(rInfo->ri_RelationDesc),
2704 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2705 : errhint("Consider defining the foreign key on table \"%s\".",
2706 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2707 : }
2708 :
2709 : /* Perform the root table's triggers. */
2710 198 : ExecARUpdateTriggers(context->estate,
2711 : rootRelInfo, sourcePartInfo, destPartInfo,
2712 : tupleid, NULL, newslot, NIL, NULL, true);
2713 198 : }
2714 :
2715 : /* ----------------------------------------------------------------
2716 : * ExecUpdate
2717 : *
2718 : * note: we can't run UPDATE queries with transactions
2719 : * off because UPDATEs are actually INSERTs and our
2720 : * scan will mistakenly loop forever, updating the tuple
2721 : * it just inserted.. This should be fixed but until it
2722 : * is, we don't want to get stuck in an infinite loop
2723 : * which corrupts your database..
2724 : *
2725 : * When updating a table, tupleid identifies the tuple to update and
2726 : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2727 : * oldtuple is passed to the triggers and identifies what to update, and
2728 : * tupleid is invalid. When updating a foreign table, tupleid is
2729 : * invalid; the FDW has to figure out which row to update using data from
2730 : * the planSlot. oldtuple is passed to foreign table triggers; it is
2731 : * NULL when the foreign table has no relevant triggers.
2732 : *
2733 : * oldSlot contains the old tuple value.
2734 : * slot contains the new tuple value to be stored.
2735 : * planSlot is the output of the ModifyTable's subplan; we use it
2736 : * to access values from other input tables (for RETURNING),
2737 : * row-ID junk columns, etc.
2738 : *
2739 : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2740 : * had identified the tuple to update, it will identify the tuple
2741 : * actually updated after EvalPlanQual.
2742 : * ----------------------------------------------------------------
2743 : */
2744 : static TupleTableSlot *
2745 2214090 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2746 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2747 : TupleTableSlot *slot, bool canSetTag)
2748 : {
2749 2214090 : EState *estate = context->estate;
2750 2214090 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2751 2214090 : UpdateContext updateCxt = {0};
2752 : TM_Result result;
2753 :
2754 : /*
2755 : * abort the operation if not running transactions
2756 : */
2757 2214090 : if (IsBootstrapProcessingMode())
2758 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2759 :
2760 : /*
2761 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2762 : * done if it says we are.
2763 : */
2764 2214090 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2765 85 : return NULL;
2766 :
2767 : /* INSTEAD OF ROW UPDATE Triggers */
2768 2213993 : if (resultRelInfo->ri_TrigDesc &&
2769 3657 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2770 : {
2771 83 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2772 : oldtuple, slot))
2773 12 : return NULL; /* "do nothing" */
2774 : }
2775 2213910 : else if (resultRelInfo->ri_FdwRoutine)
2776 : {
2777 : /* Fill in GENERATEd columns */
2778 95 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2779 :
2780 : /*
2781 : * update in foreign table: let the FDW do it
2782 : */
2783 95 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2784 : resultRelInfo,
2785 : slot,
2786 : context->planSlot);
2787 :
2788 95 : if (slot == NULL) /* "do nothing" */
2789 1 : return NULL;
2790 :
2791 : /*
2792 : * AFTER ROW Triggers or RETURNING expressions might reference the
2793 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2794 : * them. (This covers the case where the FDW replaced the slot.)
2795 : */
2796 94 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2797 : }
2798 : else
2799 : {
2800 : ItemPointerData lockedtid;
2801 :
2802 : /*
2803 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2804 : * must loop back here to try again. (We don't need to redo triggers,
2805 : * however. If there are any BEFORE triggers then trigger.c will have
2806 : * done table_tuple_lock to lock the correct tuple, so there's no need
2807 : * to do them again.)
2808 : */
2809 2213815 : redo_act:
2810 2213869 : lockedtid = *tupleid;
2811 2213869 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2812 : canSetTag, &updateCxt);
2813 :
2814 : /*
2815 : * If ExecUpdateAct reports that a cross-partition update was done,
2816 : * then the RETURNING tuple (if any) has been projected and there's
2817 : * nothing else for us to do.
2818 : */
2819 2213658 : if (updateCxt.crossPartUpdate)
2820 616 : return context->cpUpdateReturningSlot;
2821 :
2822 2213129 : switch (result)
2823 : {
2824 60 : case TM_SelfModified:
2825 :
2826 : /*
2827 : * The target tuple was already updated or deleted by the
2828 : * current command, or by a later command in the current
2829 : * transaction. The former case is possible in a join UPDATE
2830 : * where multiple tuples join to the same target tuple. This
2831 : * is pretty questionable, but Postgres has always allowed it:
2832 : * we just execute the first update action and ignore
2833 : * additional update attempts.
2834 : *
2835 : * The latter case arises if the tuple is modified by a
2836 : * command in a BEFORE trigger, or perhaps by a command in a
2837 : * volatile function used in the query. In such situations we
2838 : * should not ignore the update, but it is equally unsafe to
2839 : * proceed. We don't want to discard the original UPDATE
2840 : * while keeping the triggered actions based on it; and we
2841 : * have no principled way to merge this update with the
2842 : * previous ones. So throwing an error is the only safe
2843 : * course.
2844 : *
2845 : * If a trigger actually intends this type of interaction, it
2846 : * can re-execute the UPDATE (assuming it can figure out how)
2847 : * and then return NULL to cancel the outer update.
2848 : */
2849 60 : if (context->tmfd.cmax != estate->es_output_cid)
2850 4 : ereport(ERROR,
2851 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2852 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2853 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2854 :
2855 : /* Else, already updated by self; nothing to do */
2856 56 : return NULL;
2857 :
2858 2212961 : case TM_Ok:
2859 2212961 : break;
2860 :
2861 92 : case TM_Updated:
2862 : {
2863 : TupleTableSlot *inputslot;
2864 : TupleTableSlot *epqslot;
2865 :
2866 92 : if (IsolationUsesXactSnapshot())
2867 11 : ereport(ERROR,
2868 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2869 : errmsg("could not serialize access due to concurrent update")));
2870 :
2871 : /*
2872 : * Already know that we're going to need to do EPQ, so
2873 : * fetch tuple directly into the right slot.
2874 : */
2875 81 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2876 : resultRelInfo->ri_RangeTableIndex);
2877 :
2878 81 : result = table_tuple_lock(resultRelationDesc, tupleid,
2879 : estate->es_snapshot,
2880 : inputslot, estate->es_output_cid,
2881 : updateCxt.lockmode, LockWaitBlock,
2882 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2883 : &context->tmfd);
2884 :
2885 79 : switch (result)
2886 : {
2887 74 : case TM_Ok:
2888 : Assert(context->tmfd.traversed);
2889 :
2890 74 : epqslot = EvalPlanQual(context->epqstate,
2891 : resultRelationDesc,
2892 : resultRelInfo->ri_RangeTableIndex,
2893 : inputslot);
2894 74 : if (TupIsNull(epqslot))
2895 : /* Tuple not passing quals anymore, exiting... */
2896 20 : return NULL;
2897 :
2898 : /* Make sure ri_oldTupleSlot is initialized. */
2899 54 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2900 0 : ExecInitUpdateProjection(context->mtstate,
2901 : resultRelInfo);
2902 :
2903 54 : if (resultRelInfo->ri_needLockTagTuple)
2904 : {
2905 1 : UnlockTuple(resultRelationDesc,
2906 : &lockedtid, InplaceUpdateTupleLock);
2907 1 : LockTuple(resultRelationDesc,
2908 : tupleid, InplaceUpdateTupleLock);
2909 : }
2910 :
2911 : /* Fetch the most recent version of old tuple. */
2912 54 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2913 54 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2914 : tupleid,
2915 : SnapshotAny,
2916 : oldSlot))
2917 0 : elog(ERROR, "failed to fetch tuple being updated");
2918 54 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2919 : epqslot, oldSlot);
2920 54 : goto redo_act;
2921 :
2922 1 : case TM_Deleted:
2923 : /* tuple already deleted; nothing to do */
2924 1 : return NULL;
2925 :
2926 4 : case TM_SelfModified:
2927 :
2928 : /*
2929 : * This can be reached when following an update
2930 : * chain from a tuple updated by another session,
2931 : * reaching a tuple that was already updated in
2932 : * this transaction. If previously modified by
2933 : * this command, ignore the redundant update,
2934 : * otherwise error out.
2935 : *
2936 : * See also TM_SelfModified response to
2937 : * table_tuple_update() above.
2938 : */
2939 4 : if (context->tmfd.cmax != estate->es_output_cid)
2940 1 : ereport(ERROR,
2941 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2942 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2943 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2944 3 : return NULL;
2945 :
2946 0 : default:
2947 : /* see table_tuple_lock call in ExecDelete() */
2948 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2949 : result);
2950 : return NULL;
2951 : }
2952 : }
2953 :
2954 : break;
2955 :
2956 16 : case TM_Deleted:
2957 16 : if (IsolationUsesXactSnapshot())
2958 9 : ereport(ERROR,
2959 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2960 : errmsg("could not serialize access due to concurrent delete")));
2961 : /* tuple already deleted; nothing to do */
2962 7 : return NULL;
2963 :
2964 0 : default:
2965 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2966 : result);
2967 : return NULL;
2968 : }
2969 : }
2970 :
2971 2213118 : if (canSetTag)
2972 2212713 : (estate->es_processed)++;
2973 :
2974 2213118 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2975 : slot);
2976 :
2977 : /* Process RETURNING if present */
2978 2212993 : if (resultRelInfo->ri_projectReturning)
2979 1477 : return ExecProcessReturning(context, resultRelInfo, false,
2980 : oldSlot, slot, context->planSlot);
2981 :
2982 2211516 : return NULL;
2983 : }
2984 :
2985 : /*
2986 : * ExecOnConflictLockRow --- lock the row for ON CONFLICT DO SELECT/UPDATE
2987 : *
2988 : * Try to lock tuple for update as part of speculative insertion for ON
2989 : * CONFLICT DO UPDATE or ON CONFLICT DO SELECT FOR UPDATE/SHARE.
2990 : *
2991 : * Returns true if the row is successfully locked, or false if the caller must
2992 : * retry the INSERT from scratch.
2993 : */
2994 : static bool
2995 2810 : ExecOnConflictLockRow(ModifyTableContext *context,
2996 : TupleTableSlot *existing,
2997 : ItemPointer conflictTid,
2998 : Relation relation,
2999 : LockTupleMode lockmode,
3000 : bool isUpdate)
3001 : {
3002 : TM_FailureData tmfd;
3003 : TM_Result test;
3004 : Datum xminDatum;
3005 : TransactionId xmin;
3006 : bool isnull;
3007 :
3008 : /*
3009 : * Lock tuple with lockmode. Don't follow updates when tuple cannot be
3010 : * locked without doing so. A row locking conflict here means our
3011 : * previous conclusion that the tuple is conclusively committed is not
3012 : * true anymore.
3013 : */
3014 2810 : test = table_tuple_lock(relation, conflictTid,
3015 2810 : context->estate->es_snapshot,
3016 2810 : existing, context->estate->es_output_cid,
3017 : lockmode, LockWaitBlock, 0,
3018 : &tmfd);
3019 2810 : switch (test)
3020 : {
3021 2779 : case TM_Ok:
3022 : /* success! */
3023 2779 : break;
3024 :
3025 28 : case TM_Invisible:
3026 :
3027 : /*
3028 : * This can occur when a just inserted tuple is updated again in
3029 : * the same command. E.g. because multiple rows with the same
3030 : * conflicting key values are inserted.
3031 : *
3032 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
3033 : * case. We do not want to proceed because it would lead to the
3034 : * same row being updated a second time in some unspecified order,
3035 : * and in contrast to plain UPDATEs there's no historical behavior
3036 : * to break.
3037 : *
3038 : * It is the user's responsibility to prevent this situation from
3039 : * occurring. These problems are why the SQL standard similarly
3040 : * specifies that for SQL MERGE, an exception must be raised in
3041 : * the event of an attempt to update the same row twice.
3042 : */
3043 28 : xminDatum = slot_getsysattr(existing,
3044 : MinTransactionIdAttributeNumber,
3045 : &isnull);
3046 : Assert(!isnull);
3047 28 : xmin = DatumGetTransactionId(xminDatum);
3048 :
3049 28 : if (TransactionIdIsCurrentTransactionId(xmin))
3050 28 : ereport(ERROR,
3051 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3052 : /* translator: %s is a SQL command name */
3053 : errmsg("%s command cannot affect row a second time",
3054 : isUpdate ? "ON CONFLICT DO UPDATE" : "ON CONFLICT DO SELECT"),
3055 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
3056 :
3057 : /* This shouldn't happen */
3058 0 : elog(ERROR, "attempted to lock invisible tuple");
3059 : break;
3060 :
3061 0 : case TM_SelfModified:
3062 :
3063 : /*
3064 : * This state should never be reached. As a dirty snapshot is used
3065 : * to find conflicting tuples, speculative insertion wouldn't have
3066 : * seen this row to conflict with.
3067 : */
3068 0 : elog(ERROR, "unexpected self-updated tuple");
3069 : break;
3070 :
3071 2 : case TM_Updated:
3072 2 : if (IsolationUsesXactSnapshot())
3073 0 : ereport(ERROR,
3074 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3075 : errmsg("could not serialize access due to concurrent update")));
3076 :
3077 : /*
3078 : * Tell caller to try again from the very start.
3079 : *
3080 : * It does not make sense to use the usual EvalPlanQual() style
3081 : * loop here, as the new version of the row might not conflict
3082 : * anymore, or the conflicting tuple has actually been deleted.
3083 : */
3084 2 : ExecClearTuple(existing);
3085 2 : return false;
3086 :
3087 1 : case TM_Deleted:
3088 1 : if (IsolationUsesXactSnapshot())
3089 0 : ereport(ERROR,
3090 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3091 : errmsg("could not serialize access due to concurrent delete")));
3092 :
3093 : /* see TM_Updated case */
3094 1 : ExecClearTuple(existing);
3095 1 : return false;
3096 :
3097 0 : default:
3098 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3099 : }
3100 :
3101 : /* Success, the tuple is locked. */
3102 2779 : return true;
3103 : }
3104 :
3105 : /*
3106 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
3107 : *
3108 : * Try to lock tuple for update as part of speculative insertion. If
3109 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
3110 : * (but still lock row, even though it may not satisfy estate's
3111 : * snapshot).
3112 : *
3113 : * Returns true if we're done (with or without an update), or false if
3114 : * the caller must retry the INSERT from scratch.
3115 : */
3116 : static bool
3117 2740 : ExecOnConflictUpdate(ModifyTableContext *context,
3118 : ResultRelInfo *resultRelInfo,
3119 : ItemPointer conflictTid,
3120 : TupleTableSlot *excludedSlot,
3121 : bool canSetTag,
3122 : TupleTableSlot **returning)
3123 : {
3124 2740 : ModifyTableState *mtstate = context->mtstate;
3125 2740 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3126 2740 : Relation relation = resultRelInfo->ri_RelationDesc;
3127 2740 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
3128 2740 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
3129 : LockTupleMode lockmode;
3130 :
3131 : /*
3132 : * Parse analysis should have blocked ON CONFLICT for all system
3133 : * relations, which includes these. There's no fundamental obstacle to
3134 : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
3135 : * ExecUpdate() caller.
3136 : */
3137 : Assert(!resultRelInfo->ri_needLockTagTuple);
3138 :
3139 : /* Determine lock mode to use */
3140 2740 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
3141 :
3142 : /* Lock tuple for update */
3143 2740 : if (!ExecOnConflictLockRow(context, existing, conflictTid,
3144 : resultRelInfo->ri_RelationDesc, lockmode, true))
3145 3 : return false;
3146 :
3147 : /*
3148 : * Verify that the tuple is visible to our MVCC snapshot if the current
3149 : * isolation level mandates that.
3150 : *
3151 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
3152 : * CONFLICT ... WHERE clause may prevent us from reaching that.
3153 : *
3154 : * This means we only ever continue when a new command in the current
3155 : * transaction could see the row, even though in READ COMMITTED mode the
3156 : * tuple will not be visible according to the current statement's
3157 : * snapshot. This is in line with the way UPDATE deals with newer tuple
3158 : * versions.
3159 : */
3160 2721 : ExecCheckTupleVisible(context->estate, relation, existing);
3161 :
3162 : /*
3163 : * Make tuple and any needed join variables available to ExecQual and
3164 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
3165 : * the target's existing tuple is installed in the scantuple. EXCLUDED
3166 : * has been made to reference INNER_VAR in setrefs.c, but there is no
3167 : * other redirection.
3168 : */
3169 2721 : econtext->ecxt_scantuple = existing;
3170 2721 : econtext->ecxt_innertuple = excludedSlot;
3171 2721 : econtext->ecxt_outertuple = NULL;
3172 :
3173 2721 : if (!ExecQual(onConflictSetWhere, econtext))
3174 : {
3175 21 : ExecClearTuple(existing); /* see return below */
3176 21 : InstrCountFiltered1(&mtstate->ps, 1);
3177 21 : return true; /* done with the tuple */
3178 : }
3179 :
3180 2700 : if (resultRelInfo->ri_WithCheckOptions != NIL)
3181 : {
3182 : /*
3183 : * Check target's existing tuple against UPDATE-applicable USING
3184 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
3185 : *
3186 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
3187 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK.
3188 : * Since SELECT permission on the target table is always required for
3189 : * INSERT ... ON CONFLICT DO UPDATE, the rewriter also adds SELECT RLS
3190 : * checks/WCOs for SELECT security quals, using WCOs of the same kind,
3191 : * and this check enforces them too.
3192 : *
3193 : * The rewriter will also have associated UPDATE-applicable straight
3194 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
3195 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
3196 : * kinds, so there is no danger of spurious over-enforcement in the
3197 : * INSERT or UPDATE path.
3198 : */
3199 48 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
3200 : existing,
3201 : mtstate->ps.state);
3202 : }
3203 :
3204 : /* Project the new tuple version */
3205 2684 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
3206 :
3207 : /*
3208 : * Note that it is possible that the target tuple has been modified in
3209 : * this session, after the above table_tuple_lock. We choose to not error
3210 : * out in that case, in line with ExecUpdate's treatment of similar cases.
3211 : * This can happen if an UPDATE is triggered from within ExecQual(),
3212 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
3213 : * wCTE in the ON CONFLICT's SET.
3214 : */
3215 :
3216 : /* Execute UPDATE with projection */
3217 5348 : *returning = ExecUpdate(context, resultRelInfo,
3218 : conflictTid, NULL, existing,
3219 2684 : resultRelInfo->ri_onConflict->oc_ProjSlot,
3220 : canSetTag);
3221 :
3222 : /*
3223 : * Clear out existing tuple, as there might not be another conflict among
3224 : * the next input rows. Don't want to hold resources till the end of the
3225 : * query. First though, make sure that the returning slot, if any, has a
3226 : * local copy of any OLD pass-by-reference values, if it refers to any OLD
3227 : * columns.
3228 : */
3229 2664 : if (*returning != NULL &&
3230 153 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
3231 8 : ExecMaterializeSlot(*returning);
3232 :
3233 2664 : ExecClearTuple(existing);
3234 :
3235 2664 : return true;
3236 : }
3237 :
3238 : /*
3239 : * ExecOnConflictSelect --- execute SELECT of INSERT ON CONFLICT DO SELECT
3240 : *
3241 : * If SELECT FOR UPDATE/SHARE is specified, try to lock tuple as part of
3242 : * speculative insertion. If a qual originating from ON CONFLICT DO SELECT is
3243 : * satisfied, select (but still lock row, even though it may not satisfy
3244 : * estate's snapshot).
3245 : *
3246 : * Returns true if we're done (with or without a select), or false if the
3247 : * caller must retry the INSERT from scratch.
3248 : */
3249 : static bool
3250 192 : ExecOnConflictSelect(ModifyTableContext *context,
3251 : ResultRelInfo *resultRelInfo,
3252 : ItemPointer conflictTid,
3253 : TupleTableSlot *excludedSlot,
3254 : bool canSetTag,
3255 : TupleTableSlot **returning)
3256 : {
3257 192 : ModifyTableState *mtstate = context->mtstate;
3258 192 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3259 192 : Relation relation = resultRelInfo->ri_RelationDesc;
3260 192 : ExprState *onConflictSelectWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
3261 192 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
3262 192 : LockClauseStrength lockStrength = resultRelInfo->ri_onConflict->oc_LockStrength;
3263 :
3264 : /*
3265 : * Parse analysis should have blocked ON CONFLICT for all system
3266 : * relations, which includes these. There's no fundamental obstacle to
3267 : * supporting this; we'd just need to handle LOCKTAG_TUPLE appropriately.
3268 : */
3269 : Assert(!resultRelInfo->ri_needLockTagTuple);
3270 :
3271 : /* Fetch/lock existing tuple, according to the requested lock strength */
3272 192 : if (lockStrength == LCS_NONE)
3273 : {
3274 122 : if (!table_tuple_fetch_row_version(relation,
3275 : conflictTid,
3276 : SnapshotAny,
3277 : existing))
3278 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
3279 : }
3280 : else
3281 : {
3282 : LockTupleMode lockmode;
3283 :
3284 70 : switch (lockStrength)
3285 : {
3286 1 : case LCS_FORKEYSHARE:
3287 1 : lockmode = LockTupleKeyShare;
3288 1 : break;
3289 1 : case LCS_FORSHARE:
3290 1 : lockmode = LockTupleShare;
3291 1 : break;
3292 1 : case LCS_FORNOKEYUPDATE:
3293 1 : lockmode = LockTupleNoKeyExclusive;
3294 1 : break;
3295 67 : case LCS_FORUPDATE:
3296 67 : lockmode = LockTupleExclusive;
3297 67 : break;
3298 0 : default:
3299 0 : elog(ERROR, "Unexpected lock strength %d", (int) lockStrength);
3300 : }
3301 :
3302 70 : if (!ExecOnConflictLockRow(context, existing, conflictTid,
3303 : resultRelInfo->ri_RelationDesc, lockmode, false))
3304 0 : return false;
3305 : }
3306 :
3307 : /*
3308 : * Verify that the tuple is visible to our MVCC snapshot if the current
3309 : * isolation level mandates that. See comments in ExecOnConflictUpdate().
3310 : */
3311 180 : ExecCheckTupleVisible(context->estate, relation, existing);
3312 :
3313 : /*
3314 : * Make tuple and any needed join variables available to ExecQual. The
3315 : * EXCLUDED tuple is installed in ecxt_innertuple, while the target's
3316 : * existing tuple is installed in the scantuple. EXCLUDED has been made
3317 : * to reference INNER_VAR in setrefs.c, but there is no other redirection.
3318 : */
3319 180 : econtext->ecxt_scantuple = existing;
3320 180 : econtext->ecxt_innertuple = excludedSlot;
3321 180 : econtext->ecxt_outertuple = NULL;
3322 :
3323 180 : if (!ExecQual(onConflictSelectWhere, econtext))
3324 : {
3325 24 : ExecClearTuple(existing); /* see return below */
3326 24 : InstrCountFiltered1(&mtstate->ps, 1);
3327 24 : return true; /* done with the tuple */
3328 : }
3329 :
3330 156 : if (resultRelInfo->ri_WithCheckOptions != NIL)
3331 : {
3332 : /*
3333 : * Check target's existing tuple against SELECT-applicable USING
3334 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
3335 : *
3336 : * The rewriter creates WCOs from the USING quals of SELECT policies,
3337 : * and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK. If FOR
3338 : * UPDATE/SHARE was specified, UPDATE permissions are required on the
3339 : * target table, and the rewriter also adds WCOs built from the USING
3340 : * quals of UPDATE policies, using WCOs of the same kind, and this
3341 : * check enforces them too.
3342 : */
3343 24 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
3344 : existing,
3345 : mtstate->ps.state);
3346 : }
3347 :
3348 : /* RETURNING is required for DO SELECT */
3349 : Assert(resultRelInfo->ri_projectReturning);
3350 :
3351 152 : *returning = ExecProcessReturning(context, resultRelInfo, false,
3352 : existing, existing, context->planSlot);
3353 :
3354 152 : if (canSetTag)
3355 152 : context->estate->es_processed++;
3356 :
3357 : /*
3358 : * Before releasing the existing tuple, make sure that the returning slot
3359 : * has a local copy of any pass-by-reference values.
3360 : */
3361 152 : ExecMaterializeSlot(*returning);
3362 :
3363 : /*
3364 : * Clear out existing tuple, as there might not be another conflict among
3365 : * the next input rows. Don't want to hold resources till the end of the
3366 : * query.
3367 : */
3368 152 : ExecClearTuple(existing);
3369 :
3370 152 : return true;
3371 : }
3372 :
3373 : /*
3374 : * Perform MERGE.
3375 : */
3376 : static TupleTableSlot *
3377 10260 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3378 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
3379 : {
3380 10260 : TupleTableSlot *rslot = NULL;
3381 : bool matched;
3382 :
3383 : /*-----
3384 : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
3385 : * valid, depending on whether the result relation is a table or a view.
3386 : * We execute the first action for which the additional WHEN MATCHED AND
3387 : * quals pass. If an action without quals is found, that action is
3388 : * executed.
3389 : *
3390 : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
3391 : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
3392 : * in sequence until one passes. This is almost identical to the WHEN
3393 : * MATCHED case, and both cases are handled by ExecMergeMatched().
3394 : *
3395 : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
3396 : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
3397 : * TARGET] actions in sequence until one passes.
3398 : *
3399 : * Things get interesting in case of concurrent update/delete of the
3400 : * target tuple. Such concurrent update/delete is detected while we are
3401 : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
3402 : *
3403 : * A concurrent update can:
3404 : *
3405 : * 1. modify the target tuple so that the results from checking any
3406 : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
3407 : * SOURCE actions potentially change, but the result from the join
3408 : * quals does not change.
3409 : *
3410 : * In this case, we are still dealing with the same kind of match
3411 : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
3412 : * actions from the start and choose the first one that satisfies the
3413 : * new target tuple.
3414 : *
3415 : * 2. modify the target tuple in the WHEN MATCHED case so that the join
3416 : * quals no longer pass and hence the source and target tuples no
3417 : * longer match.
3418 : *
3419 : * In this case, we are now dealing with a NOT MATCHED case, and we
3420 : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
3421 : * TARGET] actions. First ExecMergeMatched() processes the list of
3422 : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
3423 : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
3424 : * TARGET] actions in sequence until one passes. Thus we may execute
3425 : * two actions; one of each kind.
3426 : *
3427 : * Thus we support concurrent updates that turn MATCHED candidate rows
3428 : * into NOT MATCHED rows. However, we do not attempt to support cases
3429 : * that would turn NOT MATCHED rows into MATCHED rows, or which would
3430 : * cause a target row to match a different source row.
3431 : *
3432 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
3433 : * [BY TARGET].
3434 : *
3435 : * ExecMergeMatched() takes care of following the update chain and
3436 : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
3437 : * action, as long as the target tuple still exists. If the target tuple
3438 : * gets deleted or a concurrent update causes the join quals to fail, it
3439 : * returns a matched status of false and we call ExecMergeNotMatched().
3440 : * Given that ExecMergeMatched() always makes progress by following the
3441 : * update chain and we never switch from ExecMergeNotMatched() to
3442 : * ExecMergeMatched(), there is no risk of a livelock.
3443 : */
3444 10260 : matched = tupleid != NULL || oldtuple != NULL;
3445 10260 : if (matched)
3446 8469 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
3447 : canSetTag, &matched);
3448 :
3449 : /*
3450 : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
3451 : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
3452 : * "matched" to false, indicating that it no longer matches).
3453 : */
3454 10198 : if (!matched)
3455 : {
3456 : /*
3457 : * If a concurrent update turned a MATCHED case into a NOT MATCHED
3458 : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
3459 : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
3460 : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
3461 : * SOURCE action, and computed the row to return. If so, we cannot
3462 : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
3463 : * pending (to be processed on the next call to ExecModifyTable()).
3464 : * Otherwise, just process the action now.
3465 : */
3466 1800 : if (rslot == NULL)
3467 1798 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
3468 : else
3469 2 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
3470 : }
3471 :
3472 10159 : return rslot;
3473 : }
3474 :
3475 : /*
3476 : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3477 : * action, depending on whether the join quals are satisfied. If the target
3478 : * relation is a table, the current target tuple is identified by tupleid.
3479 : * Otherwise, if the target relation is a view, oldtuple is the current target
3480 : * tuple from the view.
3481 : *
3482 : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3483 : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3484 : * action do not pass, we check the second, then the third and so on. If we
3485 : * reach the end without finding a qualifying action, we return NULL.
3486 : * Otherwise, we execute the qualifying action and return its RETURNING
3487 : * result, if any, or NULL.
3488 : *
3489 : * On entry, "*matched" is assumed to be true. If a concurrent update or
3490 : * delete is detected that causes the join quals to no longer pass, we set it
3491 : * to false, indicating that the caller should process any NOT MATCHED [BY
3492 : * TARGET] actions.
3493 : *
3494 : * After a concurrent update, we restart from the first action to look for a
3495 : * new qualifying action to execute. If the join quals originally passed, and
3496 : * the concurrent update caused them to no longer pass, then we switch from
3497 : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3498 : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3499 : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3500 : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3501 : */
3502 : static TupleTableSlot *
3503 8469 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3504 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3505 : bool *matched)
3506 : {
3507 8469 : ModifyTableState *mtstate = context->mtstate;
3508 8469 : List **mergeActions = resultRelInfo->ri_MergeActions;
3509 : ItemPointerData lockedtid;
3510 : List *actionStates;
3511 8469 : TupleTableSlot *newslot = NULL;
3512 8469 : TupleTableSlot *rslot = NULL;
3513 8469 : EState *estate = context->estate;
3514 8469 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3515 : bool isNull;
3516 8469 : EPQState *epqstate = &mtstate->mt_epqstate;
3517 : ListCell *l;
3518 :
3519 : /* Expect matched to be true on entry */
3520 : Assert(*matched);
3521 :
3522 : /*
3523 : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3524 : * are done.
3525 : */
3526 8469 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3527 780 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
3528 332 : return NULL;
3529 :
3530 : /*
3531 : * Make tuple and any needed join variables available to ExecQual and
3532 : * ExecProject. The target's existing tuple is installed in the scantuple.
3533 : * This target relation's slot is required only in the case of a MATCHED
3534 : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3535 : */
3536 8137 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3537 8137 : econtext->ecxt_innertuple = context->planSlot;
3538 8137 : econtext->ecxt_outertuple = NULL;
3539 :
3540 : /*
3541 : * This routine is only invoked for matched target rows, so we should
3542 : * either have the tupleid of the target row, or an old tuple from the
3543 : * target wholerow junk attr.
3544 : */
3545 : Assert(tupleid != NULL || oldtuple != NULL);
3546 8137 : ItemPointerSetInvalid(&lockedtid);
3547 8137 : if (oldtuple != NULL)
3548 : {
3549 : Assert(!resultRelInfo->ri_needLockTagTuple);
3550 64 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3551 : false);
3552 : }
3553 : else
3554 : {
3555 8073 : if (resultRelInfo->ri_needLockTagTuple)
3556 : {
3557 : /*
3558 : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3559 : * that don't match mas_whenqual. MERGE on system catalogs is a
3560 : * minor use case, so don't bother optimizing those.
3561 : */
3562 5456 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3563 : InplaceUpdateTupleLock);
3564 5456 : lockedtid = *tupleid;
3565 : }
3566 8073 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
3567 : tupleid,
3568 : SnapshotAny,
3569 : resultRelInfo->ri_oldTupleSlot))
3570 0 : elog(ERROR, "failed to fetch the target tuple");
3571 : }
3572 :
3573 : /*
3574 : * Test the join condition. If it's satisfied, perform a MATCHED action.
3575 : * Otherwise, perform a NOT MATCHED BY SOURCE action.
3576 : *
3577 : * Note that this join condition will be NULL if there are no NOT MATCHED
3578 : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3579 : * need only consider MATCHED actions here.
3580 : */
3581 8137 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3582 8015 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
3583 : else
3584 122 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3585 :
3586 8137 : lmerge_matched:
3587 :
3588 14600 : foreach(l, actionStates)
3589 : {
3590 8220 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
3591 8220 : CmdType commandType = relaction->mas_action->commandType;
3592 : TM_Result result;
3593 8220 : UpdateContext updateCxt = {0};
3594 :
3595 : /*
3596 : * Test condition, if any.
3597 : *
3598 : * In the absence of any condition, we perform the action
3599 : * unconditionally (no need to check separately since ExecQual() will
3600 : * return true if there are no conditions to evaluate).
3601 : */
3602 8220 : if (!ExecQual(relaction->mas_whenqual, econtext))
3603 6423 : continue;
3604 :
3605 : /*
3606 : * Check if the existing target tuple meets the USING checks of
3607 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3608 : * error.
3609 : *
3610 : * The WITH CHECK quals for UPDATE RLS policies are applied in
3611 : * ExecUpdateAct() and hence we need not do anything special to handle
3612 : * them.
3613 : *
3614 : * NOTE: We must do this after WHEN quals are evaluated, so that we
3615 : * check policies only when they matter.
3616 : */
3617 1797 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3618 : {
3619 76 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
3620 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
3621 : resultRelInfo,
3622 : resultRelInfo->ri_oldTupleSlot,
3623 76 : context->mtstate->ps.state);
3624 : }
3625 :
3626 : /* Perform stated action */
3627 1781 : switch (commandType)
3628 : {
3629 1413 : case CMD_UPDATE:
3630 :
3631 : /*
3632 : * Project the output tuple, and use that to update the table.
3633 : * We don't need to filter out junk attributes, because the
3634 : * UPDATE action's targetlist doesn't have any.
3635 : */
3636 1413 : newslot = ExecProject(relaction->mas_proj);
3637 :
3638 1413 : mtstate->mt_merge_action = relaction;
3639 1413 : if (!ExecUpdatePrologue(context, resultRelInfo,
3640 : tupleid, NULL, newslot, &result))
3641 : {
3642 11 : if (result == TM_Ok)
3643 102 : goto out; /* "do nothing" */
3644 :
3645 7 : break; /* concurrent update/delete */
3646 : }
3647 :
3648 : /* INSTEAD OF ROW UPDATE Triggers */
3649 1402 : if (resultRelInfo->ri_TrigDesc &&
3650 230 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3651 : {
3652 52 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3653 : oldtuple, newslot))
3654 0 : goto out; /* "do nothing" */
3655 : }
3656 : else
3657 : {
3658 : /* checked ri_needLockTagTuple above */
3659 : Assert(oldtuple == NULL);
3660 :
3661 1350 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3662 : NULL, newslot, canSetTag,
3663 : &updateCxt);
3664 :
3665 : /*
3666 : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3667 : * cross-partition update was done, then there's nothing
3668 : * else for us to do --- the UPDATE has been turned into a
3669 : * DELETE and an INSERT, and we must not perform any of
3670 : * the usual post-update tasks. Also, the RETURNING tuple
3671 : * (if any) has been projected, so we can just return
3672 : * that.
3673 : */
3674 1335 : if (updateCxt.crossPartUpdate)
3675 : {
3676 89 : mtstate->mt_merge_updated += 1;
3677 89 : rslot = context->cpUpdateReturningSlot;
3678 89 : goto out;
3679 : }
3680 : }
3681 :
3682 1298 : if (result == TM_Ok)
3683 : {
3684 1251 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3685 : tupleid, NULL, newslot);
3686 1243 : mtstate->mt_merge_updated += 1;
3687 : }
3688 1290 : break;
3689 :
3690 348 : case CMD_DELETE:
3691 348 : mtstate->mt_merge_action = relaction;
3692 348 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3693 : NULL, NULL, &result))
3694 : {
3695 7 : if (result == TM_Ok)
3696 4 : goto out; /* "do nothing" */
3697 :
3698 3 : break; /* concurrent update/delete */
3699 : }
3700 :
3701 : /* INSTEAD OF ROW DELETE Triggers */
3702 341 : if (resultRelInfo->ri_TrigDesc &&
3703 37 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3704 : {
3705 4 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3706 : oldtuple))
3707 0 : goto out; /* "do nothing" */
3708 : }
3709 : else
3710 : {
3711 : /* checked ri_needLockTagTuple above */
3712 : Assert(oldtuple == NULL);
3713 :
3714 337 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3715 : false);
3716 : }
3717 :
3718 341 : if (result == TM_Ok)
3719 : {
3720 330 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3721 : false);
3722 330 : mtstate->mt_merge_deleted += 1;
3723 : }
3724 341 : break;
3725 :
3726 20 : case CMD_NOTHING:
3727 : /* Doing nothing is always OK */
3728 20 : result = TM_Ok;
3729 20 : break;
3730 :
3731 0 : default:
3732 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3733 : }
3734 :
3735 1661 : switch (result)
3736 : {
3737 1593 : case TM_Ok:
3738 : /* all good; perform final actions */
3739 1593 : if (canSetTag && commandType != CMD_NOTHING)
3740 1559 : (estate->es_processed)++;
3741 :
3742 1593 : break;
3743 :
3744 21 : case TM_SelfModified:
3745 :
3746 : /*
3747 : * The target tuple was already updated or deleted by the
3748 : * current command, or by a later command in the current
3749 : * transaction. The former case is explicitly disallowed by
3750 : * the SQL standard for MERGE, which insists that the MERGE
3751 : * join condition should not join a target row to more than
3752 : * one source row.
3753 : *
3754 : * The latter case arises if the tuple is modified by a
3755 : * command in a BEFORE trigger, or perhaps by a command in a
3756 : * volatile function used in the query. In such situations we
3757 : * should not ignore the MERGE action, but it is equally
3758 : * unsafe to proceed. We don't want to discard the original
3759 : * MERGE action while keeping the triggered actions based on
3760 : * it; and it would be no better to allow the original MERGE
3761 : * action while discarding the updates that it triggered. So
3762 : * throwing an error is the only safe course.
3763 : */
3764 21 : if (context->tmfd.cmax != estate->es_output_cid)
3765 8 : ereport(ERROR,
3766 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3767 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3768 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3769 :
3770 13 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3771 13 : ereport(ERROR,
3772 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3773 : /* translator: %s is a SQL command name */
3774 : errmsg("%s command cannot affect row a second time",
3775 : "MERGE"),
3776 : errhint("Ensure that not more than one source row matches any one target row.")));
3777 :
3778 : /* This shouldn't happen */
3779 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3780 : break;
3781 :
3782 5 : case TM_Deleted:
3783 5 : if (IsolationUsesXactSnapshot())
3784 0 : ereport(ERROR,
3785 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3786 : errmsg("could not serialize access due to concurrent delete")));
3787 :
3788 : /*
3789 : * If the tuple was already deleted, set matched to false to
3790 : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3791 : */
3792 5 : *matched = false;
3793 5 : goto out;
3794 :
3795 42 : case TM_Updated:
3796 : {
3797 : bool was_matched;
3798 : Relation resultRelationDesc;
3799 : TupleTableSlot *epqslot,
3800 : *inputslot;
3801 : LockTupleMode lockmode;
3802 :
3803 42 : if (IsolationUsesXactSnapshot())
3804 1 : ereport(ERROR,
3805 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3806 : errmsg("could not serialize access due to concurrent update")));
3807 :
3808 : /*
3809 : * The target tuple was concurrently updated by some other
3810 : * transaction. If we are currently processing a MATCHED
3811 : * action, use EvalPlanQual() with the new version of the
3812 : * tuple and recheck the join qual, to detect a change
3813 : * from the MATCHED to the NOT MATCHED cases. If we are
3814 : * already processing a NOT MATCHED BY SOURCE action, we
3815 : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3816 : * MATCHED).
3817 : */
3818 41 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3819 41 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3820 41 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3821 :
3822 41 : if (was_matched)
3823 41 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3824 : resultRelInfo->ri_RangeTableIndex);
3825 : else
3826 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3827 :
3828 41 : result = table_tuple_lock(resultRelationDesc, tupleid,
3829 : estate->es_snapshot,
3830 : inputslot, estate->es_output_cid,
3831 : lockmode, LockWaitBlock,
3832 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3833 : &context->tmfd);
3834 41 : switch (result)
3835 : {
3836 40 : case TM_Ok:
3837 :
3838 : /*
3839 : * If the tuple was updated and migrated to
3840 : * another partition concurrently, the current
3841 : * MERGE implementation can't follow. There's
3842 : * probably a better way to handle this case, but
3843 : * it'd require recognizing the relation to which
3844 : * the tuple moved, and setting our current
3845 : * resultRelInfo to that.
3846 : */
3847 40 : if (ItemPointerIndicatesMovedPartitions(tupleid))
3848 0 : ereport(ERROR,
3849 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3850 : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3851 :
3852 : /*
3853 : * If this was a MATCHED case, use EvalPlanQual()
3854 : * to recheck the join condition.
3855 : */
3856 40 : if (was_matched)
3857 : {
3858 40 : epqslot = EvalPlanQual(epqstate,
3859 : resultRelationDesc,
3860 : resultRelInfo->ri_RangeTableIndex,
3861 : inputslot);
3862 :
3863 : /*
3864 : * If the subplan didn't return a tuple, then
3865 : * we must be dealing with an inner join for
3866 : * which the join condition no longer matches.
3867 : * This can only happen if there are no NOT
3868 : * MATCHED actions, and so there is nothing
3869 : * more to do.
3870 : */
3871 40 : if (TupIsNull(epqslot))
3872 0 : goto out;
3873 :
3874 : /*
3875 : * If we got a NULL ctid from the subplan, the
3876 : * join quals no longer pass and we switch to
3877 : * the NOT MATCHED BY SOURCE case.
3878 : */
3879 40 : (void) ExecGetJunkAttribute(epqslot,
3880 40 : resultRelInfo->ri_RowIdAttNo,
3881 : &isNull);
3882 40 : if (isNull)
3883 2 : *matched = false;
3884 :
3885 : /*
3886 : * Otherwise, recheck the join quals to see if
3887 : * we need to switch to the NOT MATCHED BY
3888 : * SOURCE case.
3889 : */
3890 40 : if (resultRelInfo->ri_needLockTagTuple)
3891 : {
3892 1 : if (ItemPointerIsValid(&lockedtid))
3893 1 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3894 : InplaceUpdateTupleLock);
3895 1 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3896 : InplaceUpdateTupleLock);
3897 1 : lockedtid = *tupleid;
3898 : }
3899 :
3900 40 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3901 : tupleid,
3902 : SnapshotAny,
3903 : resultRelInfo->ri_oldTupleSlot))
3904 0 : elog(ERROR, "failed to fetch the target tuple");
3905 :
3906 40 : if (*matched)
3907 38 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3908 : econtext);
3909 :
3910 : /* Switch lists, if necessary */
3911 40 : if (!*matched)
3912 : {
3913 4 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3914 :
3915 : /*
3916 : * If we have both NOT MATCHED BY SOURCE
3917 : * and NOT MATCHED BY TARGET actions (a
3918 : * full join between the source and target
3919 : * relations), the single previously
3920 : * matched tuple from the outer plan node
3921 : * is treated as two not matched tuples,
3922 : * in the same way as if they had not
3923 : * matched to start with. Therefore, we
3924 : * must adjust the outer plan node's tuple
3925 : * count, if we're instrumenting the
3926 : * query, to get the correct "skipped" row
3927 : * count --- see show_modifytable_info().
3928 : */
3929 4 : if (outerPlanState(mtstate)->instrument &&
3930 1 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] &&
3931 1 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET])
3932 1 : InstrUpdateTupleCount(outerPlanState(mtstate)->instrument, 1.0);
3933 : }
3934 : }
3935 :
3936 : /*
3937 : * Loop back and process the MATCHED or NOT
3938 : * MATCHED BY SOURCE actions from the start.
3939 : */
3940 40 : goto lmerge_matched;
3941 :
3942 0 : case TM_Deleted:
3943 :
3944 : /*
3945 : * tuple already deleted; tell caller to run NOT
3946 : * MATCHED [BY TARGET] actions
3947 : */
3948 0 : *matched = false;
3949 0 : goto out;
3950 :
3951 1 : case TM_SelfModified:
3952 :
3953 : /*
3954 : * This can be reached when following an update
3955 : * chain from a tuple updated by another session,
3956 : * reaching a tuple that was already updated or
3957 : * deleted by the current command, or by a later
3958 : * command in the current transaction. As above,
3959 : * this should always be treated as an error.
3960 : */
3961 1 : if (context->tmfd.cmax != estate->es_output_cid)
3962 0 : ereport(ERROR,
3963 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3964 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3965 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3966 :
3967 1 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3968 1 : ereport(ERROR,
3969 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3970 : /* translator: %s is a SQL command name */
3971 : errmsg("%s command cannot affect row a second time",
3972 : "MERGE"),
3973 : errhint("Ensure that not more than one source row matches any one target row.")));
3974 :
3975 : /* This shouldn't happen */
3976 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3977 : goto out;
3978 :
3979 0 : default:
3980 : /* see table_tuple_lock call in ExecDelete() */
3981 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3982 : result);
3983 : goto out;
3984 : }
3985 : }
3986 :
3987 0 : case TM_Invisible:
3988 : case TM_WouldBlock:
3989 : case TM_BeingModified:
3990 : /* these should not occur */
3991 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3992 : break;
3993 : }
3994 :
3995 : /* Process RETURNING if present */
3996 1593 : if (resultRelInfo->ri_projectReturning)
3997 : {
3998 288 : switch (commandType)
3999 : {
4000 124 : case CMD_UPDATE:
4001 124 : rslot = ExecProcessReturning(context,
4002 : resultRelInfo,
4003 : false,
4004 : resultRelInfo->ri_oldTupleSlot,
4005 : newslot,
4006 : context->planSlot);
4007 124 : break;
4008 :
4009 164 : case CMD_DELETE:
4010 164 : rslot = ExecProcessReturning(context,
4011 : resultRelInfo,
4012 : true,
4013 : resultRelInfo->ri_oldTupleSlot,
4014 : NULL,
4015 : context->planSlot);
4016 164 : break;
4017 :
4018 0 : case CMD_NOTHING:
4019 0 : break;
4020 :
4021 0 : default:
4022 0 : elog(ERROR, "unrecognized commandType: %d",
4023 : (int) commandType);
4024 : }
4025 : }
4026 :
4027 : /*
4028 : * We've activated one of the WHEN clauses, so we don't search
4029 : * further. This is required behaviour, not an optimization.
4030 : */
4031 1593 : break;
4032 : }
4033 :
4034 : /*
4035 : * Successfully executed an action or no qualifying action was found.
4036 : */
4037 8075 : out:
4038 8075 : if (ItemPointerIsValid(&lockedtid))
4039 5456 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
4040 : InplaceUpdateTupleLock);
4041 8075 : return rslot;
4042 : }
4043 :
4044 : /*
4045 : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
4046 : */
4047 : static TupleTableSlot *
4048 1800 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
4049 : bool canSetTag)
4050 : {
4051 1800 : ModifyTableState *mtstate = context->mtstate;
4052 1800 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
4053 : List *actionStates;
4054 1800 : TupleTableSlot *rslot = NULL;
4055 : ListCell *l;
4056 :
4057 : /*
4058 : * For INSERT actions, the root relation's merge action is OK since the
4059 : * INSERT's targetlist and the WHEN conditions can only refer to the
4060 : * source relation and hence it does not matter which result relation we
4061 : * work with.
4062 : *
4063 : * XXX does this mean that we can avoid creating copies of actionStates on
4064 : * partitioned tables, for not-matched actions?
4065 : */
4066 1800 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
4067 :
4068 : /*
4069 : * Make source tuple available to ExecQual and ExecProject. We don't need
4070 : * the target tuple, since the WHEN quals and targetlist can't refer to
4071 : * the target columns.
4072 : */
4073 1800 : econtext->ecxt_scantuple = NULL;
4074 1800 : econtext->ecxt_innertuple = context->planSlot;
4075 1800 : econtext->ecxt_outertuple = NULL;
4076 :
4077 2380 : foreach(l, actionStates)
4078 : {
4079 1800 : MergeActionState *action = (MergeActionState *) lfirst(l);
4080 1800 : CmdType commandType = action->mas_action->commandType;
4081 : TupleTableSlot *newslot;
4082 :
4083 : /*
4084 : * Test condition, if any.
4085 : *
4086 : * In the absence of any condition, we perform the action
4087 : * unconditionally (no need to check separately since ExecQual() will
4088 : * return true if there are no conditions to evaluate).
4089 : */
4090 1800 : if (!ExecQual(action->mas_whenqual, econtext))
4091 580 : continue;
4092 :
4093 : /* Perform stated action */
4094 1220 : switch (commandType)
4095 : {
4096 1220 : case CMD_INSERT:
4097 :
4098 : /*
4099 : * Project the tuple. In case of a partitioned table, the
4100 : * projection was already built to use the root's descriptor,
4101 : * so we don't need to map the tuple here.
4102 : */
4103 1220 : newslot = ExecProject(action->mas_proj);
4104 1220 : mtstate->mt_merge_action = action;
4105 :
4106 1220 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
4107 : newslot, canSetTag, NULL, NULL);
4108 1181 : mtstate->mt_merge_inserted += 1;
4109 1181 : break;
4110 0 : case CMD_NOTHING:
4111 : /* Do nothing */
4112 0 : break;
4113 0 : default:
4114 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
4115 : }
4116 :
4117 : /*
4118 : * We've activated one of the WHEN clauses, so we don't search
4119 : * further. This is required behaviour, not an optimization.
4120 : */
4121 1181 : break;
4122 : }
4123 :
4124 1761 : return rslot;
4125 : }
4126 :
4127 : /*
4128 : * Initialize state for execution of MERGE.
4129 : */
4130 : void
4131 1055 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
4132 : {
4133 1055 : List *mergeActionLists = mtstate->mt_mergeActionLists;
4134 1055 : List *mergeJoinConditions = mtstate->mt_mergeJoinConditions;
4135 1055 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
4136 : ResultRelInfo *resultRelInfo;
4137 : ExprContext *econtext;
4138 : ListCell *lc;
4139 : int i;
4140 :
4141 1055 : if (mergeActionLists == NIL)
4142 0 : return;
4143 :
4144 1055 : mtstate->mt_merge_subcommands = 0;
4145 :
4146 1055 : if (mtstate->ps.ps_ExprContext == NULL)
4147 863 : ExecAssignExprContext(estate, &mtstate->ps);
4148 1055 : econtext = mtstate->ps.ps_ExprContext;
4149 :
4150 : /*
4151 : * Create a MergeActionState for each action on the mergeActionList and
4152 : * add it to either a list of matched actions or not-matched actions.
4153 : *
4154 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
4155 : * anything here, do so there too.
4156 : */
4157 1055 : i = 0;
4158 2266 : foreach(lc, mergeActionLists)
4159 : {
4160 1211 : List *mergeActionList = lfirst(lc);
4161 : Node *joinCondition;
4162 : TupleDesc relationDesc;
4163 : ListCell *l;
4164 :
4165 1211 : joinCondition = (Node *) list_nth(mergeJoinConditions, i);
4166 1211 : resultRelInfo = mtstate->resultRelInfo + i;
4167 1211 : i++;
4168 1211 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
4169 :
4170 : /* initialize slots for MERGE fetches from this rel */
4171 1211 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4172 1211 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
4173 :
4174 : /* initialize state for join condition checking */
4175 1211 : resultRelInfo->ri_MergeJoinCondition =
4176 1211 : ExecInitQual((List *) joinCondition, &mtstate->ps);
4177 :
4178 3319 : foreach(l, mergeActionList)
4179 : {
4180 2108 : MergeAction *action = (MergeAction *) lfirst(l);
4181 : MergeActionState *action_state;
4182 : TupleTableSlot *tgtslot;
4183 : TupleDesc tgtdesc;
4184 :
4185 : /*
4186 : * Build action merge state for this rel. (For partitions,
4187 : * equivalent code exists in ExecInitPartitionInfo.)
4188 : */
4189 2108 : action_state = makeNode(MergeActionState);
4190 2108 : action_state->mas_action = action;
4191 2108 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
4192 : &mtstate->ps);
4193 :
4194 : /*
4195 : * We create three lists - one for each MergeMatchKind - and stick
4196 : * the MergeActionState into the appropriate list.
4197 : */
4198 4216 : resultRelInfo->ri_MergeActions[action->matchKind] =
4199 2108 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
4200 : action_state);
4201 :
4202 2108 : switch (action->commandType)
4203 : {
4204 704 : case CMD_INSERT:
4205 : /* INSERT actions always use rootRelInfo */
4206 704 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
4207 : action->targetList);
4208 :
4209 : /*
4210 : * If the MERGE targets a partitioned table, any INSERT
4211 : * actions must be routed through it, not the child
4212 : * relations. Initialize the routing struct and the root
4213 : * table's "new" tuple slot for that, if not already done.
4214 : * The projection we prepare, for all relations, uses the
4215 : * root relation descriptor, and targets the plan's root
4216 : * slot. (This is consistent with the fact that we
4217 : * checked the plan output to match the root relation,
4218 : * above.)
4219 : */
4220 704 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
4221 : RELKIND_PARTITIONED_TABLE)
4222 : {
4223 216 : if (mtstate->mt_partition_tuple_routing == NULL)
4224 : {
4225 : /*
4226 : * Initialize planstate for routing if not already
4227 : * done.
4228 : *
4229 : * Note that the slot is managed as a standalone
4230 : * slot belonging to ModifyTableState, so we pass
4231 : * NULL for the 2nd argument.
4232 : */
4233 100 : mtstate->mt_root_tuple_slot =
4234 100 : table_slot_create(rootRelInfo->ri_RelationDesc,
4235 : NULL);
4236 100 : mtstate->mt_partition_tuple_routing =
4237 100 : ExecSetupPartitionTupleRouting(estate,
4238 : rootRelInfo->ri_RelationDesc);
4239 : }
4240 216 : tgtslot = mtstate->mt_root_tuple_slot;
4241 216 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
4242 : }
4243 : else
4244 : {
4245 : /*
4246 : * If the MERGE targets an inherited table, we insert
4247 : * into the root table, so we must initialize its
4248 : * "new" tuple slot, if not already done, and use its
4249 : * relation descriptor for the projection.
4250 : *
4251 : * For non-inherited tables, rootRelInfo and
4252 : * resultRelInfo are the same, and the "new" tuple
4253 : * slot will already have been initialized.
4254 : */
4255 488 : if (rootRelInfo->ri_newTupleSlot == NULL)
4256 24 : rootRelInfo->ri_newTupleSlot =
4257 24 : table_slot_create(rootRelInfo->ri_RelationDesc,
4258 : &estate->es_tupleTable);
4259 :
4260 488 : tgtslot = rootRelInfo->ri_newTupleSlot;
4261 488 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
4262 : }
4263 :
4264 704 : action_state->mas_proj =
4265 704 : ExecBuildProjectionInfo(action->targetList, econtext,
4266 : tgtslot,
4267 : &mtstate->ps,
4268 : tgtdesc);
4269 :
4270 704 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
4271 704 : break;
4272 1040 : case CMD_UPDATE:
4273 1040 : action_state->mas_proj =
4274 1040 : ExecBuildUpdateProjection(action->targetList,
4275 : true,
4276 : action->updateColnos,
4277 : relationDesc,
4278 : econtext,
4279 : resultRelInfo->ri_newTupleSlot,
4280 : &mtstate->ps);
4281 1040 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
4282 1040 : break;
4283 314 : case CMD_DELETE:
4284 314 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
4285 314 : break;
4286 50 : case CMD_NOTHING:
4287 50 : break;
4288 0 : default:
4289 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
4290 : break;
4291 : }
4292 : }
4293 : }
4294 :
4295 : /*
4296 : * If the MERGE targets an inherited table, any INSERT actions will use
4297 : * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array.
4298 : * Therefore we must initialize its WITH CHECK OPTION constraints and
4299 : * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo
4300 : * entries.
4301 : *
4302 : * Note that the planner does not build a withCheckOptionList or
4303 : * returningList for the root relation, but as in ExecInitPartitionInfo,
4304 : * we can use the first resultRelInfo entry as a reference to calculate
4305 : * the attno's for the root table.
4306 : */
4307 1055 : if (rootRelInfo != mtstate->resultRelInfo &&
4308 159 : rootRelInfo->ri_RelationDesc->rd_rel->relkind != RELKIND_PARTITIONED_TABLE &&
4309 32 : (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0)
4310 : {
4311 24 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
4312 24 : Relation rootRelation = rootRelInfo->ri_RelationDesc;
4313 24 : Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
4314 24 : int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
4315 24 : AttrMap *part_attmap = NULL;
4316 : bool found_whole_row;
4317 :
4318 24 : if (node->withCheckOptionLists != NIL)
4319 : {
4320 : List *wcoList;
4321 12 : List *wcoExprs = NIL;
4322 :
4323 : /* There should be as many WCO lists as result rels */
4324 : Assert(list_length(node->withCheckOptionLists) ==
4325 : list_length(node->resultRelations));
4326 :
4327 : /*
4328 : * Use the first WCO list as a reference. In the most common case,
4329 : * this will be for the same relation as rootRelInfo, and so there
4330 : * will be no need to adjust its attno's.
4331 : */
4332 12 : wcoList = linitial(node->withCheckOptionLists);
4333 12 : if (rootRelation != firstResultRel)
4334 : {
4335 : /* Convert any Vars in it to contain the root's attno's */
4336 : part_attmap =
4337 12 : build_attrmap_by_name(RelationGetDescr(rootRelation),
4338 : RelationGetDescr(firstResultRel),
4339 : false);
4340 :
4341 : wcoList = (List *)
4342 12 : map_variable_attnos((Node *) wcoList,
4343 : firstVarno, 0,
4344 : part_attmap,
4345 12 : RelationGetForm(rootRelation)->reltype,
4346 : &found_whole_row);
4347 : }
4348 :
4349 60 : foreach(lc, wcoList)
4350 : {
4351 48 : WithCheckOption *wco = lfirst_node(WithCheckOption, lc);
4352 48 : ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
4353 : &mtstate->ps);
4354 :
4355 48 : wcoExprs = lappend(wcoExprs, wcoExpr);
4356 : }
4357 :
4358 12 : rootRelInfo->ri_WithCheckOptions = wcoList;
4359 12 : rootRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4360 : }
4361 :
4362 24 : if (node->returningLists != NIL)
4363 : {
4364 : List *returningList;
4365 :
4366 : /* There should be as many returning lists as result rels */
4367 : Assert(list_length(node->returningLists) ==
4368 : list_length(node->resultRelations));
4369 :
4370 : /*
4371 : * Use the first returning list as a reference. In the most common
4372 : * case, this will be for the same relation as rootRelInfo, and so
4373 : * there will be no need to adjust its attno's.
4374 : */
4375 4 : returningList = linitial(node->returningLists);
4376 4 : if (rootRelation != firstResultRel)
4377 : {
4378 : /* Convert any Vars in it to contain the root's attno's */
4379 4 : if (part_attmap == NULL)
4380 : part_attmap =
4381 0 : build_attrmap_by_name(RelationGetDescr(rootRelation),
4382 : RelationGetDescr(firstResultRel),
4383 : false);
4384 :
4385 : returningList = (List *)
4386 4 : map_variable_attnos((Node *) returningList,
4387 : firstVarno, 0,
4388 : part_attmap,
4389 4 : RelationGetForm(rootRelation)->reltype,
4390 : &found_whole_row);
4391 : }
4392 4 : rootRelInfo->ri_returningList = returningList;
4393 :
4394 : /* Initialize the RETURNING projection */
4395 4 : rootRelInfo->ri_projectReturning =
4396 4 : ExecBuildProjectionInfo(returningList, econtext,
4397 : mtstate->ps.ps_ResultTupleSlot,
4398 : &mtstate->ps,
4399 : RelationGetDescr(rootRelation));
4400 : }
4401 : }
4402 : }
4403 :
4404 : /*
4405 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
4406 : *
4407 : * We mark 'projectNewInfoValid' even though the projections themselves
4408 : * are not initialized here.
4409 : */
4410 : void
4411 1226 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
4412 : ResultRelInfo *resultRelInfo)
4413 : {
4414 1226 : EState *estate = mtstate->ps.state;
4415 :
4416 : Assert(!resultRelInfo->ri_projectNewInfoValid);
4417 :
4418 1226 : resultRelInfo->ri_oldTupleSlot =
4419 1226 : table_slot_create(resultRelInfo->ri_RelationDesc,
4420 : &estate->es_tupleTable);
4421 1226 : resultRelInfo->ri_newTupleSlot =
4422 1226 : table_slot_create(resultRelInfo->ri_RelationDesc,
4423 : &estate->es_tupleTable);
4424 1226 : resultRelInfo->ri_projectNewInfoValid = true;
4425 1226 : }
4426 :
4427 : /*
4428 : * Process BEFORE EACH STATEMENT triggers
4429 : */
4430 : static void
4431 75621 : fireBSTriggers(ModifyTableState *node)
4432 : {
4433 75621 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
4434 75621 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4435 :
4436 75621 : switch (node->operation)
4437 : {
4438 57437 : case CMD_INSERT:
4439 57437 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4440 57429 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
4441 595 : ExecBSUpdateTriggers(node->ps.state,
4442 : resultRelInfo);
4443 57429 : break;
4444 8919 : case CMD_UPDATE:
4445 8919 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4446 8919 : break;
4447 8309 : case CMD_DELETE:
4448 8309 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4449 8309 : break;
4450 956 : case CMD_MERGE:
4451 956 : if (node->mt_merge_subcommands & MERGE_INSERT)
4452 523 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4453 956 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4454 629 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4455 956 : if (node->mt_merge_subcommands & MERGE_DELETE)
4456 258 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4457 956 : break;
4458 0 : default:
4459 0 : elog(ERROR, "unknown operation");
4460 : break;
4461 : }
4462 75613 : }
4463 :
4464 : /*
4465 : * Process AFTER EACH STATEMENT triggers
4466 : */
4467 : static void
4468 73323 : fireASTriggers(ModifyTableState *node)
4469 : {
4470 73323 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
4471 73323 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4472 :
4473 73323 : switch (node->operation)
4474 : {
4475 55844 : case CMD_INSERT:
4476 55844 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
4477 523 : ExecASUpdateTriggers(node->ps.state,
4478 : resultRelInfo,
4479 523 : node->mt_oc_transition_capture);
4480 55844 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4481 55844 : node->mt_transition_capture);
4482 55844 : break;
4483 8414 : case CMD_UPDATE:
4484 8414 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4485 8414 : node->mt_transition_capture);
4486 8414 : break;
4487 8210 : case CMD_DELETE:
4488 8210 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4489 8210 : node->mt_transition_capture);
4490 8210 : break;
4491 855 : case CMD_MERGE:
4492 855 : if (node->mt_merge_subcommands & MERGE_DELETE)
4493 234 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4494 234 : node->mt_transition_capture);
4495 855 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4496 564 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4497 564 : node->mt_transition_capture);
4498 855 : if (node->mt_merge_subcommands & MERGE_INSERT)
4499 478 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4500 478 : node->mt_transition_capture);
4501 855 : break;
4502 0 : default:
4503 0 : elog(ERROR, "unknown operation");
4504 : break;
4505 : }
4506 73323 : }
4507 :
4508 : /*
4509 : * Set up the state needed for collecting transition tuples for AFTER
4510 : * triggers.
4511 : */
4512 : static void
4513 75875 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
4514 : {
4515 75875 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
4516 75875 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
4517 :
4518 : /* Check for transition tables on the directly targeted relation. */
4519 75875 : mtstate->mt_transition_capture =
4520 75875 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4521 75875 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4522 : mtstate->operation);
4523 75875 : if (plan->operation == CMD_INSERT &&
4524 56398 : plan->onConflictAction == ONCONFLICT_UPDATE)
4525 599 : mtstate->mt_oc_transition_capture =
4526 599 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4527 599 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4528 : CMD_UPDATE);
4529 75875 : }
4530 :
4531 : /*
4532 : * ExecPrepareTupleRouting --- prepare for routing one tuple
4533 : *
4534 : * Determine the partition in which the tuple in slot is to be inserted,
4535 : * and return its ResultRelInfo in *partRelInfo. The return value is
4536 : * a slot holding the tuple of the partition rowtype.
4537 : *
4538 : * This also sets the transition table information in mtstate based on the
4539 : * selected partition.
4540 : */
4541 : static TupleTableSlot *
4542 480974 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
4543 : EState *estate,
4544 : PartitionTupleRouting *proute,
4545 : ResultRelInfo *targetRelInfo,
4546 : TupleTableSlot *slot,
4547 : ResultRelInfo **partRelInfo)
4548 : {
4549 : ResultRelInfo *partrel;
4550 : TupleConversionMap *map;
4551 :
4552 : /*
4553 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
4554 : * not find a valid partition for the tuple in 'slot' then an error is
4555 : * raised. An error may also be raised if the found partition is not a
4556 : * valid target for INSERTs. This is required since a partitioned table
4557 : * UPDATE to another partition becomes a DELETE+INSERT.
4558 : */
4559 480974 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
4560 :
4561 : /*
4562 : * If we're capturing transition tuples, we might need to convert from the
4563 : * partition rowtype to root partitioned table's rowtype. But if there
4564 : * are no BEFORE triggers on the partition that could change the tuple, we
4565 : * can just remember the original unconverted tuple to avoid a needless
4566 : * round trip conversion.
4567 : */
4568 480830 : if (mtstate->mt_transition_capture != NULL)
4569 : {
4570 : bool has_before_insert_row_trig;
4571 :
4572 130 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
4573 28 : partrel->ri_TrigDesc->trig_insert_before_row);
4574 :
4575 102 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
4576 102 : !has_before_insert_row_trig ? slot : NULL;
4577 : }
4578 :
4579 : /*
4580 : * Convert the tuple, if necessary.
4581 : */
4582 480830 : map = ExecGetRootToChildMap(partrel, estate);
4583 480830 : if (map != NULL)
4584 : {
4585 45760 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
4586 :
4587 45760 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
4588 : }
4589 :
4590 480830 : *partRelInfo = partrel;
4591 480830 : return slot;
4592 : }
4593 :
4594 : /* ----------------------------------------------------------------
4595 : * ExecModifyTable
4596 : *
4597 : * Perform table modifications as required, and return RETURNING results
4598 : * if needed.
4599 : * ----------------------------------------------------------------
4600 : */
4601 : static TupleTableSlot *
4602 80684 : ExecModifyTable(PlanState *pstate)
4603 : {
4604 80684 : ModifyTableState *node = castNode(ModifyTableState, pstate);
4605 : ModifyTableContext context;
4606 80684 : EState *estate = node->ps.state;
4607 80684 : CmdType operation = node->operation;
4608 : ResultRelInfo *resultRelInfo;
4609 : PlanState *subplanstate;
4610 : TupleTableSlot *slot;
4611 : TupleTableSlot *oldSlot;
4612 : ItemPointerData tuple_ctid;
4613 : HeapTupleData oldtupdata;
4614 : HeapTuple oldtuple;
4615 : ItemPointer tupleid;
4616 : bool tuplock;
4617 :
4618 80684 : CHECK_FOR_INTERRUPTS();
4619 :
4620 : /*
4621 : * This should NOT get called during EvalPlanQual; we should have passed a
4622 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
4623 : * Assert because this condition is easy to miss in testing. (Note:
4624 : * although ModifyTable should not get executed within an EvalPlanQual
4625 : * operation, we do have to allow it to be initialized and shut down in
4626 : * case it is within a CTE subplan. Hence this test must be here, not in
4627 : * ExecInitModifyTable.)
4628 : */
4629 80684 : if (estate->es_epq_active != NULL)
4630 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4631 :
4632 : /*
4633 : * If we've already completed processing, don't try to do more. We need
4634 : * this test because ExecPostprocessPlan might call us an extra time, and
4635 : * our subplan's nodes aren't necessarily robust against being called
4636 : * extra times.
4637 : */
4638 80684 : if (node->mt_done)
4639 536 : return NULL;
4640 :
4641 : /*
4642 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
4643 : */
4644 80148 : if (node->fireBSTriggers)
4645 : {
4646 74577 : fireBSTriggers(node);
4647 74569 : node->fireBSTriggers = false;
4648 : }
4649 :
4650 : /* Preload local variables */
4651 80140 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4652 80140 : subplanstate = outerPlanState(node);
4653 :
4654 : /* Set global context */
4655 80140 : context.mtstate = node;
4656 80140 : context.epqstate = &node->mt_epqstate;
4657 80140 : context.estate = estate;
4658 :
4659 : /*
4660 : * Fetch rows from subplan, and execute the required table modification
4661 : * for each row.
4662 : */
4663 : for (;;)
4664 : {
4665 : /*
4666 : * Reset the per-output-tuple exprcontext. This is needed because
4667 : * triggers expect to use that context as workspace. It's a bit ugly
4668 : * to do this below the top level of the plan, however. We might need
4669 : * to rethink this later.
4670 : */
4671 11051016 : ResetPerTupleExprContext(estate);
4672 :
4673 : /*
4674 : * Reset per-tuple memory context used for processing on conflict and
4675 : * returning clauses, to free any expression evaluation storage
4676 : * allocated in the previous cycle.
4677 : */
4678 11051016 : if (pstate->ps_ExprContext)
4679 2235168 : ResetExprContext(pstate->ps_ExprContext);
4680 :
4681 : /*
4682 : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4683 : * to execute, do so now --- see the comments in ExecMerge().
4684 : */
4685 11051016 : if (node->mt_merge_pending_not_matched != NULL)
4686 : {
4687 2 : context.planSlot = node->mt_merge_pending_not_matched;
4688 2 : context.cpDeletedSlot = NULL;
4689 :
4690 2 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4691 2 : node->canSetTag);
4692 :
4693 : /* Clear the pending action */
4694 2 : node->mt_merge_pending_not_matched = NULL;
4695 :
4696 : /*
4697 : * If we got a RETURNING result, return it to the caller. We'll
4698 : * continue the work on next call.
4699 : */
4700 2 : if (slot)
4701 2 : return slot;
4702 :
4703 0 : continue; /* continue with the next tuple */
4704 : }
4705 :
4706 : /* Fetch the next row from subplan */
4707 11051014 : context.planSlot = ExecProcNode(subplanstate);
4708 11050719 : context.cpDeletedSlot = NULL;
4709 :
4710 : /* No more tuples to process? */
4711 11050719 : if (TupIsNull(context.planSlot))
4712 : break;
4713 :
4714 : /*
4715 : * When there are multiple result relations, each tuple contains a
4716 : * junk column that gives the OID of the rel from which it came.
4717 : * Extract it and select the correct result relation.
4718 : */
4719 10978423 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
4720 : {
4721 : Datum datum;
4722 : bool isNull;
4723 : Oid resultoid;
4724 :
4725 3417 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4726 : &isNull);
4727 3417 : if (isNull)
4728 : {
4729 : /*
4730 : * For commands other than MERGE, any tuples having InvalidOid
4731 : * for tableoid are errors. For MERGE, we may need to handle
4732 : * them as WHEN NOT MATCHED clauses if any, so do that.
4733 : *
4734 : * Note that we use the node's toplevel resultRelInfo, not any
4735 : * specific partition's.
4736 : */
4737 338 : if (operation == CMD_MERGE)
4738 : {
4739 338 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4740 :
4741 338 : slot = ExecMerge(&context, node->resultRelInfo,
4742 338 : NULL, NULL, node->canSetTag);
4743 :
4744 : /*
4745 : * If we got a RETURNING result, return it to the caller.
4746 : * We'll continue the work on next call.
4747 : */
4748 330 : if (slot)
4749 25 : return slot;
4750 :
4751 305 : continue; /* continue with the next tuple */
4752 : }
4753 :
4754 0 : elog(ERROR, "tableoid is NULL");
4755 : }
4756 3079 : resultoid = DatumGetObjectId(datum);
4757 :
4758 : /* If it's not the same as last time, we need to locate the rel */
4759 3079 : if (resultoid != node->mt_lastResultOid)
4760 2127 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4761 : false, true);
4762 : }
4763 :
4764 : /*
4765 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4766 : * here is compute the RETURNING expressions.
4767 : */
4768 10978085 : if (resultRelInfo->ri_usesFdwDirectModify)
4769 : {
4770 : Assert(resultRelInfo->ri_projectReturning);
4771 :
4772 : /*
4773 : * A scan slot containing the data that was actually inserted,
4774 : * updated or deleted has already been made available to
4775 : * ExecProcessReturning by IterateDirectModify, so no need to
4776 : * provide it here. The individual old and new slots are not
4777 : * needed, since direct-modify is disabled if the RETURNING list
4778 : * refers to OLD/NEW values.
4779 : */
4780 : Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4781 : (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4782 :
4783 347 : slot = ExecProcessReturning(&context, resultRelInfo,
4784 : operation == CMD_DELETE,
4785 : NULL, NULL, context.planSlot);
4786 :
4787 347 : return slot;
4788 : }
4789 :
4790 10977738 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4791 10977738 : slot = context.planSlot;
4792 :
4793 10977738 : tupleid = NULL;
4794 10977738 : oldtuple = NULL;
4795 :
4796 : /*
4797 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4798 : * to be updated/deleted/merged. For a heap relation, that's a TID;
4799 : * otherwise we may have a wholerow junk attr that carries the old
4800 : * tuple in toto. Keep this in step with the part of
4801 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4802 : */
4803 10977738 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4804 : operation == CMD_MERGE)
4805 : {
4806 : char relkind;
4807 : Datum datum;
4808 : bool isNull;
4809 :
4810 3242384 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4811 3242384 : if (relkind == RELKIND_RELATION ||
4812 338 : relkind == RELKIND_MATVIEW ||
4813 : relkind == RELKIND_PARTITIONED_TABLE)
4814 : {
4815 : /*
4816 : * ri_RowIdAttNo refers to a ctid attribute. See the comment
4817 : * in ExecInitModifyTable().
4818 : */
4819 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo) ||
4820 : relkind == RELKIND_PARTITIONED_TABLE);
4821 3242050 : datum = ExecGetJunkAttribute(slot,
4822 3242050 : resultRelInfo->ri_RowIdAttNo,
4823 : &isNull);
4824 :
4825 : /*
4826 : * For commands other than MERGE, any tuples having a null row
4827 : * identifier are errors. For MERGE, we may need to handle
4828 : * them as WHEN NOT MATCHED clauses if any, so do that.
4829 : *
4830 : * Note that we use the node's toplevel resultRelInfo, not any
4831 : * specific partition's.
4832 : */
4833 3242050 : if (isNull)
4834 : {
4835 1421 : if (operation == CMD_MERGE)
4836 : {
4837 1421 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4838 :
4839 1421 : slot = ExecMerge(&context, node->resultRelInfo,
4840 1421 : NULL, NULL, node->canSetTag);
4841 :
4842 : /*
4843 : * If we got a RETURNING result, return it to the
4844 : * caller. We'll continue the work on next call.
4845 : */
4846 1394 : if (slot)
4847 88 : return slot;
4848 :
4849 1334 : continue; /* continue with the next tuple */
4850 : }
4851 :
4852 0 : elog(ERROR, "ctid is NULL");
4853 : }
4854 :
4855 3240629 : tupleid = (ItemPointer) DatumGetPointer(datum);
4856 3240629 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4857 3240629 : tupleid = &tuple_ctid;
4858 : }
4859 :
4860 : /*
4861 : * Use the wholerow attribute, when available, to reconstruct the
4862 : * old relation tuple. The old tuple serves one or both of two
4863 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4864 : * provides values for any unchanged columns for the NEW tuple of
4865 : * an UPDATE, because the subplan does not produce all the columns
4866 : * of the target table.
4867 : *
4868 : * Note that the wholerow attribute does not carry system columns,
4869 : * so foreign table triggers miss seeing those, except that we
4870 : * know enough here to set t_tableOid. Quite separately from
4871 : * this, the FDW may fetch its own junk attrs to identify the row.
4872 : *
4873 : * Other relevant relkinds, currently limited to views, always
4874 : * have a wholerow attribute.
4875 : */
4876 334 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4877 : {
4878 319 : datum = ExecGetJunkAttribute(slot,
4879 319 : resultRelInfo->ri_RowIdAttNo,
4880 : &isNull);
4881 :
4882 : /*
4883 : * For commands other than MERGE, any tuples having a null row
4884 : * identifier are errors. For MERGE, we may need to handle
4885 : * them as WHEN NOT MATCHED clauses if any, so do that.
4886 : *
4887 : * Note that we use the node's toplevel resultRelInfo, not any
4888 : * specific partition's.
4889 : */
4890 319 : if (isNull)
4891 : {
4892 32 : if (operation == CMD_MERGE)
4893 : {
4894 32 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4895 :
4896 32 : slot = ExecMerge(&context, node->resultRelInfo,
4897 32 : NULL, NULL, node->canSetTag);
4898 :
4899 : /*
4900 : * If we got a RETURNING result, return it to the
4901 : * caller. We'll continue the work on next call.
4902 : */
4903 28 : if (slot)
4904 8 : return slot;
4905 :
4906 20 : continue; /* continue with the next tuple */
4907 : }
4908 :
4909 0 : elog(ERROR, "wholerow is NULL");
4910 : }
4911 :
4912 287 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4913 287 : oldtupdata.t_len =
4914 287 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4915 287 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4916 : /* Historically, view triggers see invalid t_tableOid. */
4917 287 : oldtupdata.t_tableOid =
4918 287 : (relkind == RELKIND_VIEW) ? InvalidOid :
4919 105 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4920 :
4921 287 : oldtuple = &oldtupdata;
4922 : }
4923 : else
4924 : {
4925 : /* Only foreign tables are allowed to omit a row-ID attr */
4926 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4927 : }
4928 : }
4929 :
4930 10976285 : switch (operation)
4931 : {
4932 7735354 : case CMD_INSERT:
4933 : /* Initialize projection info if first time for this table */
4934 7735354 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4935 55638 : ExecInitInsertProjection(node, resultRelInfo);
4936 7735354 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4937 7735354 : slot = ExecInsert(&context, resultRelInfo, slot,
4938 7735354 : node->canSetTag, NULL, NULL);
4939 7733918 : break;
4940 :
4941 2211406 : case CMD_UPDATE:
4942 2211406 : tuplock = false;
4943 :
4944 : /* Initialize projection info if first time for this table */
4945 2211406 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4946 8669 : ExecInitUpdateProjection(node, resultRelInfo);
4947 :
4948 : /*
4949 : * Make the new tuple by combining plan's output tuple with
4950 : * the old tuple being updated.
4951 : */
4952 2211406 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4953 2211406 : if (oldtuple != NULL)
4954 : {
4955 : Assert(!resultRelInfo->ri_needLockTagTuple);
4956 : /* Use the wholerow junk attr as the old tuple. */
4957 179 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4958 : }
4959 : else
4960 : {
4961 : /* Fetch the most recent version of old tuple. */
4962 2211227 : Relation relation = resultRelInfo->ri_RelationDesc;
4963 :
4964 2211227 : if (resultRelInfo->ri_needLockTagTuple)
4965 : {
4966 15889 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4967 15889 : tuplock = true;
4968 : }
4969 2211227 : if (!table_tuple_fetch_row_version(relation, tupleid,
4970 : SnapshotAny,
4971 : oldSlot))
4972 0 : elog(ERROR, "failed to fetch tuple being updated");
4973 : }
4974 2211406 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4975 : oldSlot);
4976 :
4977 : /* Now apply the update. */
4978 2211406 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4979 2211406 : oldSlot, slot, node->canSetTag);
4980 2211043 : if (tuplock)
4981 15889 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4982 : InplaceUpdateTupleLock);
4983 2211043 : break;
4984 :
4985 1021056 : case CMD_DELETE:
4986 1021056 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4987 1021056 : true, false, node->canSetTag, NULL, NULL, NULL);
4988 1020985 : break;
4989 :
4990 8469 : case CMD_MERGE:
4991 8469 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4992 8469 : node->canSetTag);
4993 8407 : break;
4994 :
4995 0 : default:
4996 0 : elog(ERROR, "unknown operation");
4997 : break;
4998 : }
4999 :
5000 : /*
5001 : * If we got a RETURNING result, return it to caller. We'll continue
5002 : * the work on next call.
5003 : */
5004 10974353 : if (slot)
5005 5116 : return slot;
5006 : }
5007 :
5008 : /*
5009 : * Insert remaining tuples for batch insert.
5010 : */
5011 72296 : if (estate->es_insert_pending_result_relations != NIL)
5012 13 : ExecPendingInserts(estate);
5013 :
5014 : /*
5015 : * We're done, but fire AFTER STATEMENT triggers before exiting.
5016 : */
5017 72295 : fireASTriggers(node);
5018 :
5019 72295 : node->mt_done = true;
5020 :
5021 72295 : return NULL;
5022 : }
5023 :
5024 : /*
5025 : * ExecLookupResultRelByOid
5026 : * If the table with given OID is among the result relations to be
5027 : * updated by the given ModifyTable node, return its ResultRelInfo.
5028 : *
5029 : * If not found, return NULL if missing_ok, else raise error.
5030 : *
5031 : * If update_cache is true, then upon successful lookup, update the node's
5032 : * one-element cache. ONLY ExecModifyTable may pass true for this.
5033 : */
5034 : ResultRelInfo *
5035 8290 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
5036 : bool missing_ok, bool update_cache)
5037 : {
5038 8290 : if (node->mt_resultOidHash)
5039 : {
5040 : /* Use the pre-built hash table to locate the rel */
5041 : MTTargetRelLookup *mtlookup;
5042 :
5043 : mtlookup = (MTTargetRelLookup *)
5044 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
5045 0 : if (mtlookup)
5046 : {
5047 0 : if (update_cache)
5048 : {
5049 0 : node->mt_lastResultOid = resultoid;
5050 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
5051 : }
5052 0 : return node->resultRelInfo + mtlookup->relationIndex;
5053 : }
5054 : }
5055 : else
5056 : {
5057 : /* With few target rels, just search the ResultRelInfo array */
5058 15732 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
5059 : {
5060 9973 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
5061 :
5062 9973 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
5063 : {
5064 2531 : if (update_cache)
5065 : {
5066 2127 : node->mt_lastResultOid = resultoid;
5067 2127 : node->mt_lastResultIndex = ndx;
5068 : }
5069 2531 : return rInfo;
5070 : }
5071 : }
5072 : }
5073 :
5074 5759 : if (!missing_ok)
5075 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
5076 5759 : return NULL;
5077 : }
5078 :
5079 : /* ----------------------------------------------------------------
5080 : * ExecInitModifyTable
5081 : * ----------------------------------------------------------------
5082 : */
5083 : ModifyTableState *
5084 75494 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
5085 : {
5086 : ModifyTableState *mtstate;
5087 75494 : Plan *subplan = outerPlan(node);
5088 75494 : CmdType operation = node->operation;
5089 75494 : int total_nrels = list_length(node->resultRelations);
5090 : int nrels;
5091 75494 : List *resultRelations = NIL;
5092 75494 : List *withCheckOptionLists = NIL;
5093 75494 : List *returningLists = NIL;
5094 75494 : List *updateColnosLists = NIL;
5095 75494 : List *mergeActionLists = NIL;
5096 75494 : List *mergeJoinConditions = NIL;
5097 : ResultRelInfo *resultRelInfo;
5098 : List *arowmarks;
5099 : ListCell *l;
5100 : int i;
5101 : Relation rel;
5102 :
5103 : /* check for unsupported flags */
5104 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
5105 :
5106 : /*
5107 : * Only consider unpruned relations for initializing their ResultRelInfo
5108 : * struct and other fields such as withCheckOptions, etc.
5109 : *
5110 : * Note: We must avoid pruning every result relation. This is important
5111 : * for MERGE, since even if every result relation is pruned from the
5112 : * subplan, there might still be NOT MATCHED rows, for which there may be
5113 : * INSERT actions to perform. To allow these actions to be found, at
5114 : * least one result relation must be kept. Also, when inserting into a
5115 : * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct
5116 : * as a reference for building the ResultRelInfo of the target partition.
5117 : * In either case, it doesn't matter which result relation is kept, so we
5118 : * just keep the first one, if all others have been pruned. See also,
5119 : * ExecDoInitialPruning(), which ensures that this first result relation
5120 : * has been locked.
5121 : */
5122 75494 : i = 0;
5123 152651 : foreach(l, node->resultRelations)
5124 : {
5125 77157 : Index rti = lfirst_int(l);
5126 : bool keep_rel;
5127 :
5128 77157 : keep_rel = bms_is_member(rti, estate->es_unpruned_relids);
5129 77157 : if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL)
5130 : {
5131 : /* all result relations pruned; keep the first one */
5132 32 : keep_rel = true;
5133 32 : rti = linitial_int(node->resultRelations);
5134 32 : i = 0;
5135 : }
5136 :
5137 77157 : if (keep_rel)
5138 : {
5139 77100 : resultRelations = lappend_int(resultRelations, rti);
5140 77100 : if (node->withCheckOptionLists)
5141 : {
5142 1052 : List *withCheckOptions = list_nth_node(List,
5143 : node->withCheckOptionLists,
5144 : i);
5145 :
5146 1052 : withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions);
5147 : }
5148 77100 : if (node->returningLists)
5149 : {
5150 3794 : List *returningList = list_nth_node(List,
5151 : node->returningLists,
5152 : i);
5153 :
5154 3794 : returningLists = lappend(returningLists, returningList);
5155 : }
5156 77100 : if (node->updateColnosLists)
5157 : {
5158 10511 : List *updateColnosList = list_nth(node->updateColnosLists, i);
5159 :
5160 10511 : updateColnosLists = lappend(updateColnosLists, updateColnosList);
5161 : }
5162 77100 : if (node->mergeActionLists)
5163 : {
5164 1219 : List *mergeActionList = list_nth(node->mergeActionLists, i);
5165 :
5166 1219 : mergeActionLists = lappend(mergeActionLists, mergeActionList);
5167 : }
5168 77100 : if (node->mergeJoinConditions)
5169 : {
5170 1219 : List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i);
5171 :
5172 1219 : mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition);
5173 : }
5174 : }
5175 77157 : i++;
5176 : }
5177 75494 : nrels = list_length(resultRelations);
5178 : Assert(nrels > 0);
5179 :
5180 : /*
5181 : * create state structure
5182 : */
5183 75494 : mtstate = makeNode(ModifyTableState);
5184 75494 : mtstate->ps.plan = (Plan *) node;
5185 75494 : mtstate->ps.state = estate;
5186 75494 : mtstate->ps.ExecProcNode = ExecModifyTable;
5187 :
5188 75494 : mtstate->operation = operation;
5189 75494 : mtstate->canSetTag = node->canSetTag;
5190 75494 : mtstate->mt_done = false;
5191 :
5192 75494 : mtstate->mt_nrels = nrels;
5193 75494 : mtstate->resultRelInfo = palloc_array(ResultRelInfo, nrels);
5194 :
5195 75494 : mtstate->mt_merge_pending_not_matched = NULL;
5196 75494 : mtstate->mt_merge_inserted = 0;
5197 75494 : mtstate->mt_merge_updated = 0;
5198 75494 : mtstate->mt_merge_deleted = 0;
5199 75494 : mtstate->mt_updateColnosLists = updateColnosLists;
5200 75494 : mtstate->mt_mergeActionLists = mergeActionLists;
5201 75494 : mtstate->mt_mergeJoinConditions = mergeJoinConditions;
5202 :
5203 : /*----------
5204 : * Resolve the target relation. This is the same as:
5205 : *
5206 : * - the relation for which we will fire FOR STATEMENT triggers,
5207 : * - the relation into whose tuple format all captured transition tuples
5208 : * must be converted, and
5209 : * - the root partitioned table used for tuple routing.
5210 : *
5211 : * If it's a partitioned or inherited table, the root partition or
5212 : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
5213 : * given explicitly in node->rootRelation. Otherwise, the target relation
5214 : * is the sole relation in the node->resultRelations list and, since it can
5215 : * never be pruned, also in the resultRelations list constructed above.
5216 : *----------
5217 : */
5218 75494 : if (node->rootRelation > 0)
5219 : {
5220 : Assert(bms_is_member(node->rootRelation, estate->es_unpruned_relids));
5221 1919 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
5222 1919 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
5223 : node->rootRelation);
5224 : }
5225 : else
5226 : {
5227 : Assert(list_length(node->resultRelations) == 1);
5228 : Assert(list_length(resultRelations) == 1);
5229 73575 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
5230 73575 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
5231 73575 : linitial_int(resultRelations));
5232 : }
5233 :
5234 : /* set up epqstate with dummy subplan data for the moment */
5235 75494 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
5236 : node->epqParam, resultRelations);
5237 75494 : mtstate->fireBSTriggers = true;
5238 :
5239 : /*
5240 : * Build state for collecting transition tuples. This requires having a
5241 : * valid trigger query context, so skip it in explain-only mode.
5242 : */
5243 75494 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
5244 74831 : ExecSetupTransitionCaptureState(mtstate, estate);
5245 :
5246 : /*
5247 : * Open all the result relations and initialize the ResultRelInfo structs.
5248 : * (But root relation was initialized above, if it's part of the array.)
5249 : * We must do this before initializing the subplan, because direct-modify
5250 : * FDWs expect their ResultRelInfos to be available.
5251 : */
5252 75494 : resultRelInfo = mtstate->resultRelInfo;
5253 75494 : i = 0;
5254 152366 : foreach(l, resultRelations)
5255 : {
5256 77096 : Index resultRelation = lfirst_int(l);
5257 77096 : List *mergeActions = NIL;
5258 :
5259 77096 : if (mergeActionLists)
5260 1219 : mergeActions = list_nth(mergeActionLists, i);
5261 :
5262 77096 : if (resultRelInfo != mtstate->rootResultRelInfo)
5263 : {
5264 3521 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
5265 :
5266 : /*
5267 : * For child result relations, store the root result relation
5268 : * pointer. We do so for the convenience of places that want to
5269 : * look at the query's original target relation but don't have the
5270 : * mtstate handy.
5271 : */
5272 3521 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
5273 : }
5274 :
5275 : /* Initialize the usesFdwDirectModify flag */
5276 77096 : resultRelInfo->ri_usesFdwDirectModify =
5277 77096 : bms_is_member(i, node->fdwDirectModifyPlans);
5278 :
5279 : /*
5280 : * Verify result relation is a valid target for the current operation
5281 : */
5282 77096 : CheckValidResultRel(resultRelInfo, operation, node->onConflictAction,
5283 : mergeActions);
5284 :
5285 76872 : resultRelInfo++;
5286 76872 : i++;
5287 : }
5288 :
5289 : /*
5290 : * Now we may initialize the subplan.
5291 : */
5292 75270 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
5293 :
5294 : /*
5295 : * Do additional per-result-relation initialization.
5296 : */
5297 152120 : for (i = 0; i < nrels; i++)
5298 : {
5299 76850 : resultRelInfo = &mtstate->resultRelInfo[i];
5300 :
5301 : /* Let FDWs init themselves for foreign-table result rels */
5302 76850 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5303 76746 : resultRelInfo->ri_FdwRoutine != NULL &&
5304 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
5305 : {
5306 170 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
5307 :
5308 170 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
5309 : resultRelInfo,
5310 : fdw_private,
5311 : i,
5312 : eflags);
5313 : }
5314 :
5315 : /*
5316 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
5317 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
5318 : * tables, the FDW might have created additional junk attr(s), but
5319 : * those are no concern of ours.
5320 : */
5321 76850 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
5322 : operation == CMD_MERGE)
5323 : {
5324 : char relkind;
5325 :
5326 20271 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
5327 20271 : if (relkind == RELKIND_RELATION ||
5328 406 : relkind == RELKIND_MATVIEW ||
5329 : relkind == RELKIND_PARTITIONED_TABLE)
5330 : {
5331 19895 : resultRelInfo->ri_RowIdAttNo =
5332 19895 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
5333 :
5334 : /*
5335 : * For heap relations, a ctid junk attribute must be present.
5336 : * Partitioned tables should only appear here when all leaf
5337 : * partitions were pruned, in which case no rows can be
5338 : * produced and ctid is not needed.
5339 : */
5340 19895 : if (relkind == RELKIND_PARTITIONED_TABLE)
5341 : Assert(nrels == 1);
5342 19865 : else if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5343 0 : elog(ERROR, "could not find junk ctid column");
5344 : }
5345 376 : else if (relkind == RELKIND_FOREIGN_TABLE)
5346 : {
5347 : /*
5348 : * We don't support MERGE with foreign tables for now. (It's
5349 : * problematic because the implementation uses CTID.)
5350 : */
5351 : Assert(operation != CMD_MERGE);
5352 :
5353 : /*
5354 : * When there is a row-level trigger, there should be a
5355 : * wholerow attribute. We also require it to be present in
5356 : * UPDATE and MERGE, so we can get the values of unchanged
5357 : * columns.
5358 : */
5359 186 : resultRelInfo->ri_RowIdAttNo =
5360 186 : ExecFindJunkAttributeInTlist(subplan->targetlist,
5361 : "wholerow");
5362 186 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
5363 105 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5364 0 : elog(ERROR, "could not find junk wholerow column");
5365 : }
5366 : else
5367 : {
5368 : /* Other valid target relkinds must provide wholerow */
5369 190 : resultRelInfo->ri_RowIdAttNo =
5370 190 : ExecFindJunkAttributeInTlist(subplan->targetlist,
5371 : "wholerow");
5372 190 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
5373 0 : elog(ERROR, "could not find junk wholerow column");
5374 : }
5375 : }
5376 : }
5377 :
5378 : /*
5379 : * If this is an inherited update/delete/merge, there will be a junk
5380 : * attribute named "tableoid" present in the subplan's targetlist. It
5381 : * will be used to identify the result relation for a given tuple to be
5382 : * updated/deleted/merged.
5383 : */
5384 75270 : mtstate->mt_resultOidAttno =
5385 75270 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
5386 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || total_nrels == 1);
5387 75270 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
5388 75270 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
5389 :
5390 : /* Get the root target relation */
5391 75270 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
5392 :
5393 : /*
5394 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
5395 : * or MERGE might need this too, but only if it actually moves tuples
5396 : * between partitions; in that case setup is done by
5397 : * ExecCrossPartitionUpdate.
5398 : */
5399 75270 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
5400 : operation == CMD_INSERT)
5401 3910 : mtstate->mt_partition_tuple_routing =
5402 3910 : ExecSetupPartitionTupleRouting(estate, rel);
5403 :
5404 : /*
5405 : * Initialize any WITH CHECK OPTION constraints if needed.
5406 : */
5407 75270 : resultRelInfo = mtstate->resultRelInfo;
5408 76322 : foreach(l, withCheckOptionLists)
5409 : {
5410 1052 : List *wcoList = (List *) lfirst(l);
5411 1052 : List *wcoExprs = NIL;
5412 : ListCell *ll;
5413 :
5414 3115 : foreach(ll, wcoList)
5415 : {
5416 2063 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
5417 2063 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
5418 : &mtstate->ps);
5419 :
5420 2063 : wcoExprs = lappend(wcoExprs, wcoExpr);
5421 : }
5422 :
5423 1052 : resultRelInfo->ri_WithCheckOptions = wcoList;
5424 1052 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
5425 1052 : resultRelInfo++;
5426 : }
5427 :
5428 : /*
5429 : * Initialize RETURNING projections if needed.
5430 : */
5431 75270 : if (returningLists)
5432 : {
5433 : TupleTableSlot *slot;
5434 : ExprContext *econtext;
5435 :
5436 : /*
5437 : * Initialize result tuple slot and assign its rowtype using the plan
5438 : * node's declared targetlist, which the planner set up to be the same
5439 : * as the first (before runtime pruning) RETURNING list. We assume
5440 : * all the result rels will produce compatible output.
5441 : */
5442 3579 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
5443 3579 : slot = mtstate->ps.ps_ResultTupleSlot;
5444 :
5445 : /* Need an econtext too */
5446 3579 : if (mtstate->ps.ps_ExprContext == NULL)
5447 3579 : ExecAssignExprContext(estate, &mtstate->ps);
5448 3579 : econtext = mtstate->ps.ps_ExprContext;
5449 :
5450 : /*
5451 : * Build a projection for each result rel.
5452 : */
5453 3579 : resultRelInfo = mtstate->resultRelInfo;
5454 7373 : foreach(l, returningLists)
5455 : {
5456 3794 : List *rlist = (List *) lfirst(l);
5457 :
5458 3794 : resultRelInfo->ri_returningList = rlist;
5459 3794 : resultRelInfo->ri_projectReturning =
5460 3794 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
5461 3794 : resultRelInfo->ri_RelationDesc->rd_att);
5462 3794 : resultRelInfo++;
5463 : }
5464 : }
5465 : else
5466 : {
5467 : /*
5468 : * We still must construct a dummy result tuple type, because InitPlan
5469 : * expects one (maybe should change that?).
5470 : */
5471 71691 : ExecInitResultTypeTL(&mtstate->ps);
5472 :
5473 71691 : mtstate->ps.ps_ExprContext = NULL;
5474 : }
5475 :
5476 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
5477 75270 : resultRelInfo = mtstate->resultRelInfo;
5478 75270 : if (node->onConflictAction != ONCONFLICT_NONE)
5479 : {
5480 : /* insert may only have one relation, inheritance is not expanded */
5481 : Assert(total_nrels == 1);
5482 1171 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
5483 : }
5484 :
5485 : /*
5486 : * For ON CONFLICT DO SELECT/UPDATE, initialize the ON CONFLICT action
5487 : * state.
5488 : */
5489 75270 : if (node->onConflictAction == ONCONFLICT_UPDATE ||
5490 74623 : node->onConflictAction == ONCONFLICT_SELECT)
5491 : {
5492 867 : OnConflictActionState *onconfl = makeNode(OnConflictActionState);
5493 :
5494 : /* already exists if created by RETURNING processing above */
5495 867 : if (mtstate->ps.ps_ExprContext == NULL)
5496 452 : ExecAssignExprContext(estate, &mtstate->ps);
5497 :
5498 : /* action state for DO SELECT/UPDATE */
5499 867 : resultRelInfo->ri_onConflict = onconfl;
5500 :
5501 : /* lock strength for DO SELECT [FOR UPDATE/SHARE] */
5502 867 : onconfl->oc_LockStrength = node->onConflictLockStrength;
5503 :
5504 : /* initialize slot for the existing tuple */
5505 867 : onconfl->oc_Existing =
5506 867 : table_slot_create(resultRelInfo->ri_RelationDesc,
5507 867 : &mtstate->ps.state->es_tupleTable);
5508 :
5509 : /*
5510 : * For ON CONFLICT DO UPDATE, initialize target list and projection.
5511 : */
5512 867 : if (node->onConflictAction == ONCONFLICT_UPDATE)
5513 : {
5514 : ExprContext *econtext;
5515 : TupleDesc relationDesc;
5516 :
5517 647 : econtext = mtstate->ps.ps_ExprContext;
5518 647 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
5519 :
5520 : /*
5521 : * Create the tuple slot for the UPDATE SET projection. We want a
5522 : * slot of the table's type here, because the slot will be used to
5523 : * insert into the table, and for RETURNING processing - which may
5524 : * access system attributes.
5525 : */
5526 647 : onconfl->oc_ProjSlot =
5527 647 : table_slot_create(resultRelInfo->ri_RelationDesc,
5528 647 : &mtstate->ps.state->es_tupleTable);
5529 :
5530 : /* build UPDATE SET projection state */
5531 647 : onconfl->oc_ProjInfo =
5532 647 : ExecBuildUpdateProjection(node->onConflictSet,
5533 : true,
5534 : node->onConflictCols,
5535 : relationDesc,
5536 : econtext,
5537 : onconfl->oc_ProjSlot,
5538 : &mtstate->ps);
5539 : }
5540 :
5541 : /* initialize state to evaluate the WHERE clause, if any */
5542 867 : if (node->onConflictWhere)
5543 : {
5544 : ExprState *qualexpr;
5545 :
5546 203 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
5547 : &mtstate->ps);
5548 203 : onconfl->oc_WhereClause = qualexpr;
5549 : }
5550 : }
5551 :
5552 : /*
5553 : * If needed, initialize the target range for FOR PORTION OF.
5554 : */
5555 75270 : if (node->forPortionOf)
5556 : {
5557 : ResultRelInfo *rootRelInfo;
5558 : TupleDesc tupDesc;
5559 : ForPortionOfExpr *forPortionOf;
5560 : Datum targetRange;
5561 : bool isNull;
5562 : ExprContext *econtext;
5563 : ExprState *exprState;
5564 : ForPortionOfState *fpoState;
5565 :
5566 794 : rootRelInfo = mtstate->resultRelInfo;
5567 794 : if (rootRelInfo->ri_RootResultRelInfo)
5568 56 : rootRelInfo = rootRelInfo->ri_RootResultRelInfo;
5569 :
5570 794 : tupDesc = rootRelInfo->ri_RelationDesc->rd_att;
5571 794 : forPortionOf = (ForPortionOfExpr *) node->forPortionOf;
5572 :
5573 : /* Eval the FOR PORTION OF target */
5574 794 : if (mtstate->ps.ps_ExprContext == NULL)
5575 774 : ExecAssignExprContext(estate, &mtstate->ps);
5576 794 : econtext = mtstate->ps.ps_ExprContext;
5577 :
5578 794 : exprState = ExecPrepareExpr((Expr *) forPortionOf->targetRange, estate);
5579 794 : targetRange = ExecEvalExpr(exprState, econtext, &isNull);
5580 :
5581 : /*
5582 : * FOR PORTION OF ... TO ... FROM should never give us a NULL target,
5583 : * but FOR PORTION OF (...) could.
5584 : */
5585 794 : if (isNull)
5586 16 : ereport(ERROR,
5587 : (errmsg("FOR PORTION OF target was null")),
5588 : executor_errposition(estate, forPortionOf->targetLocation));
5589 :
5590 : /* Create state for FOR PORTION OF operation */
5591 :
5592 778 : fpoState = makeNode(ForPortionOfState);
5593 778 : fpoState->fp_rangeName = forPortionOf->range_name;
5594 778 : fpoState->fp_rangeType = forPortionOf->rangeType;
5595 778 : fpoState->fp_rangeAttno = forPortionOf->rangeVar->varattno;
5596 778 : fpoState->fp_targetRange = targetRange;
5597 :
5598 : /* Initialize slot for the existing tuple */
5599 :
5600 778 : fpoState->fp_Existing =
5601 778 : table_slot_create(rootRelInfo->ri_RelationDesc,
5602 778 : &mtstate->ps.state->es_tupleTable);
5603 :
5604 : /* Create the tuple slot for INSERTing the temporal leftovers */
5605 :
5606 778 : fpoState->fp_Leftover =
5607 778 : ExecInitExtraTupleSlot(mtstate->ps.state, tupDesc, &TTSOpsVirtual);
5608 :
5609 778 : rootRelInfo->ri_forPortionOf = fpoState;
5610 :
5611 : /*
5612 : * Make sure the root relation has the FOR PORTION OF clause too. Each
5613 : * partition needs its own TupleTableSlot, since they can have
5614 : * different descriptors, so they'll use the root fpoState to
5615 : * initialize one if necessary.
5616 : */
5617 778 : if (node->rootRelation > 0)
5618 56 : mtstate->rootResultRelInfo->ri_forPortionOf = fpoState;
5619 :
5620 778 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
5621 56 : mtstate->mt_partition_tuple_routing == NULL)
5622 : {
5623 : /*
5624 : * We will need tuple routing to insert temporal leftovers. Since
5625 : * we are initializing things before ExecCrossPartitionUpdate
5626 : * runs, we must do everything it needs as well.
5627 : */
5628 56 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
5629 : MemoryContext oldcxt;
5630 :
5631 : /* Things built here have to last for the query duration. */
5632 56 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
5633 :
5634 56 : mtstate->mt_partition_tuple_routing =
5635 56 : ExecSetupPartitionTupleRouting(estate, rootRel);
5636 :
5637 : /*
5638 : * Before a partition's tuple can be re-routed, it must first be
5639 : * converted to the root's format, so we'll need a slot for
5640 : * storing such tuples.
5641 : */
5642 : Assert(mtstate->mt_root_tuple_slot == NULL);
5643 56 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
5644 :
5645 56 : MemoryContextSwitchTo(oldcxt);
5646 : }
5647 :
5648 : /*
5649 : * Don't free the ExprContext here because the result must last for
5650 : * the whole query.
5651 : */
5652 : }
5653 :
5654 : /*
5655 : * If we have any secondary relations in an UPDATE or DELETE, they need to
5656 : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
5657 : * EvalPlanQual mechanism needs to be told about them. This also goes for
5658 : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
5659 : */
5660 75254 : arowmarks = NIL;
5661 77130 : foreach(l, node->rowMarks)
5662 : {
5663 1876 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
5664 1876 : RangeTblEntry *rte = exec_rt_fetch(rc->rti, estate);
5665 : ExecRowMark *erm;
5666 : ExecAuxRowMark *aerm;
5667 :
5668 : /* ignore "parent" rowmarks; they are irrelevant at runtime */
5669 1876 : if (rc->isParent)
5670 94 : continue;
5671 :
5672 : /*
5673 : * Also ignore rowmarks belonging to child tables that have been
5674 : * pruned in ExecDoInitialPruning().
5675 : */
5676 1782 : if (rte->rtekind == RTE_RELATION &&
5677 1406 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
5678 0 : continue;
5679 :
5680 : /* Find ExecRowMark and build ExecAuxRowMark */
5681 1782 : erm = ExecFindRowMark(estate, rc->rti, false);
5682 1782 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
5683 1782 : arowmarks = lappend(arowmarks, aerm);
5684 : }
5685 :
5686 : /* For a MERGE command, initialize its state */
5687 75254 : if (mtstate->operation == CMD_MERGE)
5688 1055 : ExecInitMerge(mtstate, estate);
5689 :
5690 75254 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
5691 :
5692 : /*
5693 : * If there are a lot of result relations, use a hash table to speed the
5694 : * lookups. If there are not a lot, a simple linear search is faster.
5695 : *
5696 : * It's not clear where the threshold is, but try 64 for starters. In a
5697 : * debugging build, use a small threshold so that we get some test
5698 : * coverage of both code paths.
5699 : */
5700 : #ifdef USE_ASSERT_CHECKING
5701 : #define MT_NRELS_HASH 4
5702 : #else
5703 : #define MT_NRELS_HASH 64
5704 : #endif
5705 75254 : if (nrels >= MT_NRELS_HASH)
5706 : {
5707 : HASHCTL hash_ctl;
5708 :
5709 0 : hash_ctl.keysize = sizeof(Oid);
5710 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
5711 0 : hash_ctl.hcxt = CurrentMemoryContext;
5712 0 : mtstate->mt_resultOidHash =
5713 0 : hash_create("ModifyTable target hash",
5714 : nrels, &hash_ctl,
5715 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
5716 0 : for (i = 0; i < nrels; i++)
5717 : {
5718 : Oid hashkey;
5719 : MTTargetRelLookup *mtlookup;
5720 : bool found;
5721 :
5722 0 : resultRelInfo = &mtstate->resultRelInfo[i];
5723 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
5724 : mtlookup = (MTTargetRelLookup *)
5725 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
5726 : HASH_ENTER, &found);
5727 : Assert(!found);
5728 0 : mtlookup->relationIndex = i;
5729 : }
5730 : }
5731 : else
5732 75254 : mtstate->mt_resultOidHash = NULL;
5733 :
5734 : /*
5735 : * Determine if the FDW supports batch insert and determine the batch size
5736 : * (a FDW may support batching, but it may be disabled for the
5737 : * server/table).
5738 : *
5739 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
5740 : * remains set to 0.
5741 : */
5742 75254 : if (operation == CMD_INSERT)
5743 : {
5744 : /* insert may only have one relation, inheritance is not expanded */
5745 : Assert(total_nrels == 1);
5746 56579 : resultRelInfo = mtstate->resultRelInfo;
5747 56579 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5748 56579 : resultRelInfo->ri_FdwRoutine != NULL &&
5749 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
5750 88 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
5751 : {
5752 88 : resultRelInfo->ri_BatchSize =
5753 88 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
5754 88 : Assert(resultRelInfo->ri_BatchSize >= 1);
5755 : }
5756 : else
5757 56491 : resultRelInfo->ri_BatchSize = 1;
5758 : }
5759 :
5760 : /*
5761 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
5762 : * to estate->es_auxmodifytables so that it will be run to completion by
5763 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
5764 : * ModifyTable node too, but there's no need.) Note the use of lcons not
5765 : * lappend: we need later-initialized ModifyTable nodes to be shut down
5766 : * before earlier ones. This ensures that we don't throw away RETURNING
5767 : * rows that need to be seen by a later CTE subplan.
5768 : */
5769 75254 : if (!mtstate->canSetTag)
5770 652 : estate->es_auxmodifytables = lcons(mtstate,
5771 : estate->es_auxmodifytables);
5772 :
5773 75254 : return mtstate;
5774 : }
5775 :
5776 : /* ----------------------------------------------------------------
5777 : * ExecEndModifyTable
5778 : *
5779 : * Shuts down the plan.
5780 : *
5781 : * Returns nothing of interest.
5782 : * ----------------------------------------------------------------
5783 : */
5784 : void
5785 72177 : ExecEndModifyTable(ModifyTableState *node)
5786 : {
5787 : int i;
5788 :
5789 : /*
5790 : * Allow any FDWs to shut down
5791 : */
5792 145734 : for (i = 0; i < node->mt_nrels; i++)
5793 : {
5794 : int j;
5795 73557 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
5796 :
5797 73557 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5798 73461 : resultRelInfo->ri_FdwRoutine != NULL &&
5799 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
5800 156 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
5801 : resultRelInfo);
5802 :
5803 : /*
5804 : * Cleanup the initialized batch slots. This only matters for FDWs
5805 : * with batching, but the other cases will have ri_NumSlotsInitialized
5806 : * == 0.
5807 : */
5808 73585 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
5809 : {
5810 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
5811 28 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
5812 : }
5813 : }
5814 :
5815 : /*
5816 : * Close all the partitioned tables, leaf partitions, and their indices
5817 : * and release the slot used for tuple routing, if set.
5818 : */
5819 72177 : if (node->mt_partition_tuple_routing)
5820 : {
5821 3978 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
5822 :
5823 3978 : if (node->mt_root_tuple_slot)
5824 481 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
5825 : }
5826 :
5827 : /*
5828 : * Terminate EPQ execution if active
5829 : */
5830 72177 : EvalPlanQualEnd(&node->mt_epqstate);
5831 :
5832 : /*
5833 : * shut down subplan
5834 : */
5835 72177 : ExecEndNode(outerPlanState(node));
5836 72177 : }
5837 :
5838 : void
5839 0 : ExecReScanModifyTable(ModifyTableState *node)
5840 : {
5841 : /*
5842 : * Currently, we don't need to support rescan on ModifyTable nodes. The
5843 : * semantics of that would be a bit debatable anyway.
5844 : */
5845 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
5846 : }
|