Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * The relation to modify can be an ordinary table, a foreign table, or a
28 : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : * targeted a view not in one of those two categories, earlier processing
31 : * already pointed the ModifyTable result relation to an underlying
32 : * relation of that other view. This node does process
33 : * ri_WithCheckOptions, which may have expressions from those other,
34 : * automatically updatable views.
35 : *
36 : * MERGE runs a join between the source relation and the target table.
37 : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : * is an outer join that might output tuples without a matching target
39 : * tuple. In this case, any unmatched target tuples will have NULL
40 : * row-locating info, and only INSERT can be run. But for matched target
41 : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : * SOURCE, all tuples produced by the join will include a matching target
44 : * tuple, so all tuples contain row-locating info.
45 : *
46 : * If the query specifies RETURNING, then the ModifyTable returns a
47 : * RETURNING tuple after completing each row insert, update, or delete.
48 : * It must be called again to continue the operation. Without RETURNING,
49 : * we just loop within the node until all the work is done, then
50 : * return NULL. This avoids useless call/return overhead.
51 : */
52 :
53 : #include "postgres.h"
54 :
55 : #include "access/htup_details.h"
56 : #include "access/tableam.h"
57 : #include "access/xact.h"
58 : #include "commands/trigger.h"
59 : #include "executor/execPartition.h"
60 : #include "executor/executor.h"
61 : #include "executor/nodeModifyTable.h"
62 : #include "foreign/fdwapi.h"
63 : #include "miscadmin.h"
64 : #include "nodes/nodeFuncs.h"
65 : #include "optimizer/optimizer.h"
66 : #include "rewrite/rewriteHandler.h"
67 : #include "storage/lmgr.h"
68 : #include "utils/builtins.h"
69 : #include "utils/datum.h"
70 : #include "utils/rel.h"
71 : #include "utils/snapmgr.h"
72 :
73 :
74 : typedef struct MTTargetRelLookup
75 : {
76 : Oid relationOid; /* hash key, must be first */
77 : int relationIndex; /* rel's index in resultRelInfo[] array */
78 : } MTTargetRelLookup;
79 :
80 : /*
81 : * Context struct for a ModifyTable operation, containing basic execution
82 : * state and some output variables populated by ExecUpdateAct() and
83 : * ExecDeleteAct() to report the result of their actions to callers.
84 : */
85 : typedef struct ModifyTableContext
86 : {
87 : /* Operation state */
88 : ModifyTableState *mtstate;
89 : EPQState *epqstate;
90 : EState *estate;
91 :
92 : /*
93 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
94 : * access "junk" columns that are not going to be stored.
95 : */
96 : TupleTableSlot *planSlot;
97 :
98 : /*
99 : * Information about the changes that were made concurrently to a tuple
100 : * being updated or deleted
101 : */
102 : TM_FailureData tmfd;
103 :
104 : /*
105 : * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
106 : * clause that refers to OLD columns (converted to the root's tuple
107 : * descriptor).
108 : */
109 : TupleTableSlot *cpDeletedSlot;
110 :
111 : /*
112 : * The tuple projected by the INSERT's RETURNING clause, when doing a
113 : * cross-partition UPDATE
114 : */
115 : TupleTableSlot *cpUpdateReturningSlot;
116 : } ModifyTableContext;
117 :
118 : /*
119 : * Context struct containing output data specific to UPDATE operations.
120 : */
121 : typedef struct UpdateContext
122 : {
123 : bool crossPartUpdate; /* was it a cross-partition update? */
124 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
125 :
126 : /*
127 : * Lock mode to acquire on the latest tuple version before performing
128 : * EvalPlanQual on it
129 : */
130 : LockTupleMode lockmode;
131 : } UpdateContext;
132 :
133 :
134 : static void ExecBatchInsert(ModifyTableState *mtstate,
135 : ResultRelInfo *resultRelInfo,
136 : TupleTableSlot **slots,
137 : TupleTableSlot **planSlots,
138 : int numSlots,
139 : EState *estate,
140 : bool canSetTag);
141 : static void ExecPendingInserts(EState *estate);
142 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
143 : ResultRelInfo *sourcePartInfo,
144 : ResultRelInfo *destPartInfo,
145 : ItemPointer tupleid,
146 : TupleTableSlot *oldslot,
147 : TupleTableSlot *newslot);
148 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
149 : ResultRelInfo *resultRelInfo,
150 : ItemPointer conflictTid,
151 : TupleTableSlot *excludedSlot,
152 : bool canSetTag,
153 : TupleTableSlot **returning);
154 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
155 : EState *estate,
156 : PartitionTupleRouting *proute,
157 : ResultRelInfo *targetRelInfo,
158 : TupleTableSlot *slot,
159 : ResultRelInfo **partRelInfo);
160 :
161 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
162 : ResultRelInfo *resultRelInfo,
163 : ItemPointer tupleid,
164 : HeapTuple oldtuple,
165 : bool canSetTag);
166 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
167 : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
168 : ResultRelInfo *resultRelInfo,
169 : ItemPointer tupleid,
170 : HeapTuple oldtuple,
171 : bool canSetTag,
172 : bool *matched);
173 : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
174 : ResultRelInfo *resultRelInfo,
175 : bool canSetTag);
176 :
177 :
178 : /*
179 : * Verify that the tuples to be produced by INSERT match the
180 : * target relation's rowtype
181 : *
182 : * We do this to guard against stale plans. If plan invalidation is
183 : * functioning properly then we should never get a failure here, but better
184 : * safe than sorry. Note that this is called after we have obtained lock
185 : * on the target rel, so the rowtype can't change underneath us.
186 : *
187 : * The plan output is represented by its targetlist, because that makes
188 : * handling the dropped-column case easier.
189 : *
190 : * We used to use this for UPDATE as well, but now the equivalent checks
191 : * are done in ExecBuildUpdateProjection.
192 : */
193 : static void
194 92076 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
195 : {
196 92076 : TupleDesc resultDesc = RelationGetDescr(resultRel);
197 92076 : int attno = 0;
198 : ListCell *lc;
199 :
200 280804 : foreach(lc, targetList)
201 : {
202 188728 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
203 : Form_pg_attribute attr;
204 :
205 : Assert(!tle->resjunk); /* caller removed junk items already */
206 :
207 188728 : if (attno >= resultDesc->natts)
208 0 : ereport(ERROR,
209 : (errcode(ERRCODE_DATATYPE_MISMATCH),
210 : errmsg("table row type and query-specified row type do not match"),
211 : errdetail("Query has too many columns.")));
212 188728 : attr = TupleDescAttr(resultDesc, attno);
213 188728 : attno++;
214 :
215 188728 : if (!attr->attisdropped)
216 : {
217 : /* Normal case: demand type match */
218 188112 : if (exprType((Node *) tle->expr) != attr->atttypid)
219 0 : ereport(ERROR,
220 : (errcode(ERRCODE_DATATYPE_MISMATCH),
221 : errmsg("table row type and query-specified row type do not match"),
222 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
223 : format_type_be(attr->atttypid),
224 : attno,
225 : format_type_be(exprType((Node *) tle->expr)))));
226 : }
227 : else
228 : {
229 : /*
230 : * For a dropped column, we can't check atttypid (it's likely 0).
231 : * In any case the planner has most likely inserted an INT4 null.
232 : * What we insist on is just *some* NULL constant.
233 : */
234 616 : if (!IsA(tle->expr, Const) ||
235 616 : !((Const *) tle->expr)->constisnull)
236 0 : ereport(ERROR,
237 : (errcode(ERRCODE_DATATYPE_MISMATCH),
238 : errmsg("table row type and query-specified row type do not match"),
239 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
240 : attno)));
241 : }
242 : }
243 92076 : if (attno != resultDesc->natts)
244 0 : ereport(ERROR,
245 : (errcode(ERRCODE_DATATYPE_MISMATCH),
246 : errmsg("table row type and query-specified row type do not match"),
247 : errdetail("Query has too few columns.")));
248 92076 : }
249 :
250 : /*
251 : * ExecProcessReturning --- evaluate a RETURNING list
252 : *
253 : * context: context for the ModifyTable operation
254 : * resultRelInfo: current result rel
255 : * cmdType: operation/merge action performed (INSERT, UPDATE, or DELETE)
256 : * oldSlot: slot holding old tuple deleted or updated
257 : * newSlot: slot holding new tuple inserted or updated
258 : * planSlot: slot holding tuple returned by top subplan node
259 : *
260 : * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
261 : * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
262 : * modify is disabled if the RETURNING list refers to any OLD/NEW values).
263 : *
264 : * Returns a slot holding the result tuple
265 : */
266 : static TupleTableSlot *
267 7776 : ExecProcessReturning(ModifyTableContext *context,
268 : ResultRelInfo *resultRelInfo,
269 : CmdType cmdType,
270 : TupleTableSlot *oldSlot,
271 : TupleTableSlot *newSlot,
272 : TupleTableSlot *planSlot)
273 : {
274 7776 : EState *estate = context->estate;
275 7776 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
276 7776 : ExprContext *econtext = projectReturning->pi_exprContext;
277 :
278 : /* Make tuple and any needed join variables available to ExecProject */
279 7776 : switch (cmdType)
280 : {
281 6490 : case CMD_INSERT:
282 : case CMD_UPDATE:
283 : /* return new tuple by default */
284 6490 : if (newSlot)
285 6034 : econtext->ecxt_scantuple = newSlot;
286 6490 : break;
287 :
288 1286 : case CMD_DELETE:
289 : /* return old tuple by default */
290 1286 : if (oldSlot)
291 1048 : econtext->ecxt_scantuple = oldSlot;
292 1286 : break;
293 :
294 0 : default:
295 0 : elog(ERROR, "unrecognized commandType: %d", (int) cmdType);
296 : }
297 7776 : econtext->ecxt_outertuple = planSlot;
298 :
299 : /* Make old/new tuples available to ExecProject, if required */
300 7776 : if (oldSlot)
301 3576 : econtext->ecxt_oldtuple = oldSlot;
302 4200 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
303 168 : econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
304 : else
305 4032 : econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
306 :
307 7776 : if (newSlot)
308 6034 : econtext->ecxt_newtuple = newSlot;
309 1742 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
310 132 : econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
311 : else
312 1610 : econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
313 :
314 : /*
315 : * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
316 : * information is required to evaluate ReturningExpr nodes and also in
317 : * ExecEvalSysVar() and ExecEvalWholeRowVar().
318 : */
319 7776 : if (oldSlot == NULL)
320 4200 : projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
321 : else
322 3576 : projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
323 :
324 7776 : if (newSlot == NULL)
325 1742 : projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
326 : else
327 6034 : projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
328 :
329 : /* Compute the RETURNING expressions */
330 7776 : return ExecProject(projectReturning);
331 : }
332 :
333 : /*
334 : * ExecCheckTupleVisible -- verify tuple is visible
335 : *
336 : * It would not be consistent with guarantees of the higher isolation levels to
337 : * proceed with avoiding insertion (taking speculative insertion's alternative
338 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
339 : * Check for the need to raise a serialization failure, and do so as necessary.
340 : */
341 : static void
342 5246 : ExecCheckTupleVisible(EState *estate,
343 : Relation rel,
344 : TupleTableSlot *slot)
345 : {
346 5246 : if (!IsolationUsesXactSnapshot())
347 5182 : return;
348 :
349 64 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
350 : {
351 : Datum xminDatum;
352 : TransactionId xmin;
353 : bool isnull;
354 :
355 40 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
356 : Assert(!isnull);
357 40 : xmin = DatumGetTransactionId(xminDatum);
358 :
359 : /*
360 : * We should not raise a serialization failure if the conflict is
361 : * against a tuple inserted by our own transaction, even if it's not
362 : * visible to our snapshot. (This would happen, for example, if
363 : * conflicting keys are proposed for insertion in a single command.)
364 : */
365 40 : if (!TransactionIdIsCurrentTransactionId(xmin))
366 20 : ereport(ERROR,
367 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
368 : errmsg("could not serialize access due to concurrent update")));
369 : }
370 : }
371 :
372 : /*
373 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
374 : */
375 : static void
376 212 : ExecCheckTIDVisible(EState *estate,
377 : ResultRelInfo *relinfo,
378 : ItemPointer tid,
379 : TupleTableSlot *tempSlot)
380 : {
381 212 : Relation rel = relinfo->ri_RelationDesc;
382 :
383 : /* Redundantly check isolation level */
384 212 : if (!IsolationUsesXactSnapshot())
385 148 : return;
386 :
387 64 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
388 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
389 64 : ExecCheckTupleVisible(estate, rel, tempSlot);
390 44 : ExecClearTuple(tempSlot);
391 : }
392 :
393 : /*
394 : * Initialize to compute stored generated columns for a tuple
395 : *
396 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI
397 : * or ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
398 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
399 : *
400 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
401 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
402 : * cross-partition UPDATEs, since a partition might be the target of both
403 : * UPDATE and INSERT actions.
404 : */
405 : void
406 58822 : ExecInitStoredGenerated(ResultRelInfo *resultRelInfo,
407 : EState *estate,
408 : CmdType cmdtype)
409 : {
410 58822 : Relation rel = resultRelInfo->ri_RelationDesc;
411 58822 : TupleDesc tupdesc = RelationGetDescr(rel);
412 58822 : int natts = tupdesc->natts;
413 : ExprState **ri_GeneratedExprs;
414 : int ri_NumGeneratedNeeded;
415 : Bitmapset *updatedCols;
416 : MemoryContext oldContext;
417 :
418 : /* Nothing to do if no generated columns */
419 58822 : if (!(tupdesc->constr && tupdesc->constr->has_generated_stored))
420 57874 : return;
421 :
422 : /*
423 : * In an UPDATE, we can skip computing any generated columns that do not
424 : * depend on any UPDATE target column. But if there is a BEFORE ROW
425 : * UPDATE trigger, we cannot skip because the trigger might change more
426 : * columns.
427 : */
428 948 : if (cmdtype == CMD_UPDATE &&
429 226 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
430 200 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
431 : else
432 748 : updatedCols = NULL;
433 :
434 : /*
435 : * Make sure these data structures are built in the per-query memory
436 : * context so they'll survive throughout the query.
437 : */
438 948 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
439 :
440 948 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
441 948 : ri_NumGeneratedNeeded = 0;
442 :
443 3658 : for (int i = 0; i < natts; i++)
444 : {
445 2710 : if (TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED)
446 : {
447 : Expr *expr;
448 :
449 : /* Fetch the GENERATED AS expression tree */
450 972 : expr = (Expr *) build_column_default(rel, i + 1);
451 972 : if (expr == NULL)
452 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
453 : i + 1, RelationGetRelationName(rel));
454 :
455 : /*
456 : * If it's an update with a known set of update target columns,
457 : * see if we can skip the computation.
458 : */
459 972 : if (updatedCols)
460 : {
461 206 : Bitmapset *attrs_used = NULL;
462 :
463 206 : pull_varattnos((Node *) expr, 1, &attrs_used);
464 :
465 206 : if (!bms_overlap(updatedCols, attrs_used))
466 24 : continue; /* need not update this column */
467 : }
468 :
469 : /* No luck, so prepare the expression for execution */
470 948 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
471 948 : ri_NumGeneratedNeeded++;
472 :
473 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
474 948 : if (cmdtype == CMD_UPDATE)
475 208 : resultRelInfo->ri_extraUpdatedCols =
476 208 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
477 : i + 1 - FirstLowInvalidHeapAttributeNumber);
478 : }
479 : }
480 :
481 : /* Save in appropriate set of fields */
482 948 : if (cmdtype == CMD_UPDATE)
483 : {
484 : /* Don't call twice */
485 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
486 :
487 226 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
488 226 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
489 : }
490 : else
491 : {
492 : /* Don't call twice */
493 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
494 :
495 722 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
496 722 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
497 : }
498 :
499 948 : MemoryContextSwitchTo(oldContext);
500 : }
501 :
502 : /*
503 : * Compute stored generated columns for a tuple
504 : */
505 : void
506 1260 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
507 : EState *estate, TupleTableSlot *slot,
508 : CmdType cmdtype)
509 : {
510 1260 : Relation rel = resultRelInfo->ri_RelationDesc;
511 1260 : TupleDesc tupdesc = RelationGetDescr(rel);
512 1260 : int natts = tupdesc->natts;
513 1260 : ExprContext *econtext = GetPerTupleExprContext(estate);
514 : ExprState **ri_GeneratedExprs;
515 : MemoryContext oldContext;
516 : Datum *values;
517 : bool *nulls;
518 :
519 : /* We should not be called unless this is true */
520 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
521 :
522 : /*
523 : * Initialize the expressions if we didn't already, and check whether we
524 : * can exit early because nothing needs to be computed.
525 : */
526 1260 : if (cmdtype == CMD_UPDATE)
527 : {
528 262 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
529 200 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
530 262 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
531 18 : return;
532 244 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
533 : }
534 : else
535 : {
536 998 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
537 722 : ExecInitStoredGenerated(resultRelInfo, estate, cmdtype);
538 : /* Early exit is impossible given the prior Assert */
539 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
540 998 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
541 : }
542 :
543 1242 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
544 :
545 1242 : values = palloc(sizeof(*values) * natts);
546 1242 : nulls = palloc(sizeof(*nulls) * natts);
547 :
548 1242 : slot_getallattrs(slot);
549 1242 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
550 :
551 4668 : for (int i = 0; i < natts; i++)
552 : {
553 3438 : CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
554 :
555 3438 : if (ri_GeneratedExprs[i])
556 : {
557 : Datum val;
558 : bool isnull;
559 :
560 : Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
561 :
562 1264 : econtext->ecxt_scantuple = slot;
563 :
564 1264 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
565 :
566 : /*
567 : * We must make a copy of val as we have no guarantees about where
568 : * memory for a pass-by-reference Datum is located.
569 : */
570 1252 : if (!isnull)
571 1210 : val = datumCopy(val, attr->attbyval, attr->attlen);
572 :
573 1252 : values[i] = val;
574 1252 : nulls[i] = isnull;
575 : }
576 : else
577 : {
578 2174 : if (!nulls[i])
579 2130 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
580 : }
581 : }
582 :
583 1230 : ExecClearTuple(slot);
584 1230 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
585 1230 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
586 1230 : ExecStoreVirtualTuple(slot);
587 1230 : ExecMaterializeSlot(slot);
588 :
589 1230 : MemoryContextSwitchTo(oldContext);
590 : }
591 :
592 : /*
593 : * ExecInitInsertProjection
594 : * Do one-time initialization of projection data for INSERT tuples.
595 : *
596 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
597 : *
598 : * This is also a convenient place to verify that the
599 : * output of an INSERT matches the target table.
600 : */
601 : static void
602 91096 : ExecInitInsertProjection(ModifyTableState *mtstate,
603 : ResultRelInfo *resultRelInfo)
604 : {
605 91096 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
606 91096 : Plan *subplan = outerPlan(node);
607 91096 : EState *estate = mtstate->ps.state;
608 91096 : List *insertTargetList = NIL;
609 91096 : bool need_projection = false;
610 : ListCell *l;
611 :
612 : /* Extract non-junk columns of the subplan's result tlist. */
613 277338 : foreach(l, subplan->targetlist)
614 : {
615 186242 : TargetEntry *tle = (TargetEntry *) lfirst(l);
616 :
617 186242 : if (!tle->resjunk)
618 186242 : insertTargetList = lappend(insertTargetList, tle);
619 : else
620 0 : need_projection = true;
621 : }
622 :
623 : /*
624 : * The junk-free list must produce a tuple suitable for the result
625 : * relation.
626 : */
627 91096 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
628 :
629 : /* We'll need a slot matching the table's format. */
630 91096 : resultRelInfo->ri_newTupleSlot =
631 91096 : table_slot_create(resultRelInfo->ri_RelationDesc,
632 : &estate->es_tupleTable);
633 :
634 : /* Build ProjectionInfo if needed (it probably isn't). */
635 91096 : if (need_projection)
636 : {
637 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
638 :
639 : /* need an expression context to do the projection */
640 0 : if (mtstate->ps.ps_ExprContext == NULL)
641 0 : ExecAssignExprContext(estate, &mtstate->ps);
642 :
643 0 : resultRelInfo->ri_projectNew =
644 0 : ExecBuildProjectionInfo(insertTargetList,
645 : mtstate->ps.ps_ExprContext,
646 : resultRelInfo->ri_newTupleSlot,
647 : &mtstate->ps,
648 : relDesc);
649 : }
650 :
651 91096 : resultRelInfo->ri_projectNewInfoValid = true;
652 91096 : }
653 :
654 : /*
655 : * ExecInitUpdateProjection
656 : * Do one-time initialization of projection data for UPDATE tuples.
657 : *
658 : * UPDATE always needs a projection, because (1) there's always some junk
659 : * attrs, and (2) we may need to merge values of not-updated columns from
660 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
661 : * the subplan contains only new values for the changed columns, plus row
662 : * identity info in the junk attrs.
663 : *
664 : * This is "one-time" for any given result rel, but we might touch more than
665 : * one result rel in the course of an inherited UPDATE, and each one needs
666 : * its own projection due to possible column order variation.
667 : *
668 : * This is also a convenient place to verify that the output of an UPDATE
669 : * matches the target table (ExecBuildUpdateProjection does that).
670 : */
671 : static void
672 12660 : ExecInitUpdateProjection(ModifyTableState *mtstate,
673 : ResultRelInfo *resultRelInfo)
674 : {
675 12660 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
676 12660 : Plan *subplan = outerPlan(node);
677 12660 : EState *estate = mtstate->ps.state;
678 12660 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
679 : int whichrel;
680 : List *updateColnos;
681 :
682 : /*
683 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
684 : * to, we can get the index the hard way with an integer division.
685 : */
686 12660 : whichrel = mtstate->mt_lastResultIndex;
687 12660 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
688 : {
689 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
690 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
691 : }
692 :
693 12660 : updateColnos = (List *) list_nth(node->updateColnosLists, whichrel);
694 :
695 : /*
696 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
697 : * produced by the subplan to get the new tuple. We need two slots, both
698 : * matching the table's desired format.
699 : */
700 12660 : resultRelInfo->ri_oldTupleSlot =
701 12660 : table_slot_create(resultRelInfo->ri_RelationDesc,
702 : &estate->es_tupleTable);
703 12660 : resultRelInfo->ri_newTupleSlot =
704 12660 : table_slot_create(resultRelInfo->ri_RelationDesc,
705 : &estate->es_tupleTable);
706 :
707 : /* need an expression context to do the projection */
708 12660 : if (mtstate->ps.ps_ExprContext == NULL)
709 11326 : ExecAssignExprContext(estate, &mtstate->ps);
710 :
711 12660 : resultRelInfo->ri_projectNew =
712 12660 : ExecBuildUpdateProjection(subplan->targetlist,
713 : false, /* subplan did the evaluation */
714 : updateColnos,
715 : relDesc,
716 : mtstate->ps.ps_ExprContext,
717 : resultRelInfo->ri_newTupleSlot,
718 : &mtstate->ps);
719 :
720 12660 : resultRelInfo->ri_projectNewInfoValid = true;
721 12660 : }
722 :
723 : /*
724 : * ExecGetInsertNewTuple
725 : * This prepares a "new" tuple ready to be inserted into given result
726 : * relation, by removing any junk columns of the plan's output tuple
727 : * and (if necessary) coercing the tuple to the right tuple format.
728 : */
729 : static TupleTableSlot *
730 11420742 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
731 : TupleTableSlot *planSlot)
732 : {
733 11420742 : ProjectionInfo *newProj = relinfo->ri_projectNew;
734 : ExprContext *econtext;
735 :
736 : /*
737 : * If there's no projection to be done, just make sure the slot is of the
738 : * right type for the target rel. If the planSlot is the right type we
739 : * can use it as-is, else copy the data into ri_newTupleSlot.
740 : */
741 11420742 : if (newProj == NULL)
742 : {
743 11420742 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
744 : {
745 10649732 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
746 10649732 : return relinfo->ri_newTupleSlot;
747 : }
748 : else
749 771010 : return planSlot;
750 : }
751 :
752 : /*
753 : * Else project; since the projection output slot is ri_newTupleSlot, this
754 : * will also fix any slot-type problem.
755 : *
756 : * Note: currently, this is dead code, because INSERT cases don't receive
757 : * any junk columns so there's never a projection to be done.
758 : */
759 0 : econtext = newProj->pi_exprContext;
760 0 : econtext->ecxt_outertuple = planSlot;
761 0 : return ExecProject(newProj);
762 : }
763 :
764 : /*
765 : * ExecGetUpdateNewTuple
766 : * This prepares a "new" tuple by combining an UPDATE subplan's output
767 : * tuple (which contains values of changed columns) with unchanged
768 : * columns taken from the old tuple.
769 : *
770 : * The subplan tuple might also contain junk columns, which are ignored.
771 : * Note that the projection also ensures we have a slot of the right type.
772 : */
773 : TupleTableSlot *
774 309230 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
775 : TupleTableSlot *planSlot,
776 : TupleTableSlot *oldSlot)
777 : {
778 309230 : ProjectionInfo *newProj = relinfo->ri_projectNew;
779 : ExprContext *econtext;
780 :
781 : /* Use a few extra Asserts to protect against outside callers */
782 : Assert(relinfo->ri_projectNewInfoValid);
783 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
784 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
785 :
786 309230 : econtext = newProj->pi_exprContext;
787 309230 : econtext->ecxt_outertuple = planSlot;
788 309230 : econtext->ecxt_scantuple = oldSlot;
789 309230 : return ExecProject(newProj);
790 : }
791 :
792 : /* ----------------------------------------------------------------
793 : * ExecInsert
794 : *
795 : * For INSERT, we have to insert the tuple into the target relation
796 : * (or partition thereof) and insert appropriate tuples into the index
797 : * relations.
798 : *
799 : * slot contains the new tuple value to be stored.
800 : *
801 : * Returns RETURNING result if any, otherwise NULL.
802 : * *inserted_tuple is the tuple that's effectively inserted;
803 : * *insert_destrel is the relation where it was inserted.
804 : * These are only set on success.
805 : *
806 : * This may change the currently active tuple conversion map in
807 : * mtstate->mt_transition_capture, so the callers must take care to
808 : * save the previous value to avoid losing track of it.
809 : * ----------------------------------------------------------------
810 : */
811 : static TupleTableSlot *
812 11423458 : ExecInsert(ModifyTableContext *context,
813 : ResultRelInfo *resultRelInfo,
814 : TupleTableSlot *slot,
815 : bool canSetTag,
816 : TupleTableSlot **inserted_tuple,
817 : ResultRelInfo **insert_destrel)
818 : {
819 11423458 : ModifyTableState *mtstate = context->mtstate;
820 11423458 : EState *estate = context->estate;
821 : Relation resultRelationDesc;
822 11423458 : List *recheckIndexes = NIL;
823 11423458 : TupleTableSlot *planSlot = context->planSlot;
824 11423458 : TupleTableSlot *result = NULL;
825 : TransitionCaptureState *ar_insert_trig_tcs;
826 11423458 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
827 11423458 : OnConflictAction onconflict = node->onConflictAction;
828 11423458 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
829 : MemoryContext oldContext;
830 :
831 : /*
832 : * If the input result relation is a partitioned table, find the leaf
833 : * partition to insert the tuple into.
834 : */
835 11423458 : if (proute)
836 : {
837 : ResultRelInfo *partRelInfo;
838 :
839 722210 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
840 : resultRelInfo, slot,
841 : &partRelInfo);
842 722006 : resultRelInfo = partRelInfo;
843 : }
844 :
845 11423254 : ExecMaterializeSlot(slot);
846 :
847 11423254 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
848 :
849 : /*
850 : * Open the table's indexes, if we have not done so already, so that we
851 : * can add new index entries for the inserted tuple.
852 : */
853 11423254 : if (resultRelationDesc->rd_rel->relhasindex &&
854 3009908 : resultRelInfo->ri_IndexRelationDescs == NULL)
855 31812 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
856 :
857 : /*
858 : * BEFORE ROW INSERT Triggers.
859 : *
860 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
861 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
862 : * violations before firing these triggers, because they can change the
863 : * values to insert. Also, they can run arbitrary user-defined code with
864 : * side-effects that we can't cancel by just not inserting the tuple.
865 : */
866 11423254 : if (resultRelInfo->ri_TrigDesc &&
867 75316 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
868 : {
869 : /* Flush any pending inserts, so rows are visible to the triggers */
870 2120 : if (estate->es_insert_pending_result_relations != NIL)
871 6 : ExecPendingInserts(estate);
872 :
873 2120 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
874 200 : return NULL; /* "do nothing" */
875 : }
876 :
877 : /* INSTEAD OF ROW INSERT Triggers */
878 11422938 : if (resultRelInfo->ri_TrigDesc &&
879 75000 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
880 : {
881 168 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
882 6 : return NULL; /* "do nothing" */
883 : }
884 11422770 : else if (resultRelInfo->ri_FdwRoutine)
885 : {
886 : /*
887 : * GENERATED expressions might reference the tableoid column, so
888 : * (re-)initialize tts_tableOid before evaluating them.
889 : */
890 2014 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
891 :
892 : /*
893 : * Compute stored generated columns
894 : */
895 2014 : if (resultRelationDesc->rd_att->constr &&
896 366 : resultRelationDesc->rd_att->constr->has_generated_stored)
897 8 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
898 : CMD_INSERT);
899 :
900 : /*
901 : * If the FDW supports batching, and batching is requested, accumulate
902 : * rows and insert them in batches. Otherwise use the per-row inserts.
903 : */
904 2014 : if (resultRelInfo->ri_BatchSize > 1)
905 : {
906 288 : bool flushed = false;
907 :
908 : /*
909 : * When we've reached the desired batch size, perform the
910 : * insertion.
911 : */
912 288 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
913 : {
914 20 : ExecBatchInsert(mtstate, resultRelInfo,
915 : resultRelInfo->ri_Slots,
916 : resultRelInfo->ri_PlanSlots,
917 : resultRelInfo->ri_NumSlots,
918 : estate, canSetTag);
919 20 : flushed = true;
920 : }
921 :
922 288 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
923 :
924 288 : if (resultRelInfo->ri_Slots == NULL)
925 : {
926 56 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
927 28 : resultRelInfo->ri_BatchSize);
928 28 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
929 28 : resultRelInfo->ri_BatchSize);
930 : }
931 :
932 : /*
933 : * Initialize the batch slots. We don't know how many slots will
934 : * be needed, so we initialize them as the batch grows, and we
935 : * keep them across batches. To mitigate an inefficiency in how
936 : * resource owner handles objects with many references (as with
937 : * many slots all referencing the same tuple descriptor) we copy
938 : * the appropriate tuple descriptor for each slot.
939 : */
940 288 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
941 : {
942 142 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
943 : TupleDesc plan_tdesc =
944 142 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
945 :
946 284 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
947 142 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
948 :
949 284 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
950 142 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
951 :
952 : /* remember how many batch slots we initialized */
953 142 : resultRelInfo->ri_NumSlotsInitialized++;
954 : }
955 :
956 288 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
957 : slot);
958 :
959 288 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
960 : planSlot);
961 :
962 : /*
963 : * If these are the first tuples stored in the buffers, add the
964 : * target rel and the mtstate to the
965 : * es_insert_pending_result_relations and
966 : * es_insert_pending_modifytables lists respectively, except in
967 : * the case where flushing was done above, in which case they
968 : * would already have been added to the lists, so no need to do
969 : * this.
970 : */
971 288 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
972 : {
973 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
974 : resultRelInfo));
975 36 : estate->es_insert_pending_result_relations =
976 36 : lappend(estate->es_insert_pending_result_relations,
977 : resultRelInfo);
978 36 : estate->es_insert_pending_modifytables =
979 36 : lappend(estate->es_insert_pending_modifytables, mtstate);
980 : }
981 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
982 : resultRelInfo));
983 :
984 288 : resultRelInfo->ri_NumSlots++;
985 :
986 288 : MemoryContextSwitchTo(oldContext);
987 :
988 288 : return NULL;
989 : }
990 :
991 : /*
992 : * insert into foreign table: let the FDW do it
993 : */
994 1726 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
995 : resultRelInfo,
996 : slot,
997 : planSlot);
998 :
999 1720 : if (slot == NULL) /* "do nothing" */
1000 4 : return NULL;
1001 :
1002 : /*
1003 : * AFTER ROW Triggers or RETURNING expressions might reference the
1004 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
1005 : * them. (This covers the case where the FDW replaced the slot.)
1006 : */
1007 1716 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1008 : }
1009 : else
1010 : {
1011 : WCOKind wco_kind;
1012 :
1013 : /*
1014 : * Constraints and GENERATED expressions might reference the tableoid
1015 : * column, so (re-)initialize tts_tableOid before evaluating them.
1016 : */
1017 11420756 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1018 :
1019 : /*
1020 : * Compute stored generated columns
1021 : */
1022 11420756 : if (resultRelationDesc->rd_att->constr &&
1023 3239348 : resultRelationDesc->rd_att->constr->has_generated_stored)
1024 948 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1025 : CMD_INSERT);
1026 :
1027 : /*
1028 : * Check any RLS WITH CHECK policies.
1029 : *
1030 : * Normally we should check INSERT policies. But if the insert is the
1031 : * result of a partition key update that moved the tuple to a new
1032 : * partition, we should instead check UPDATE policies, because we are
1033 : * executing policies defined on the target table, and not those
1034 : * defined on the child partitions.
1035 : *
1036 : * If we're running MERGE, we refer to the action that we're executing
1037 : * to know if we're doing an INSERT or UPDATE to a partition table.
1038 : */
1039 11420744 : if (mtstate->operation == CMD_UPDATE)
1040 764 : wco_kind = WCO_RLS_UPDATE_CHECK;
1041 11419980 : else if (mtstate->operation == CMD_MERGE)
1042 1712 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1043 1712 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
1044 : else
1045 11418268 : wco_kind = WCO_RLS_INSERT_CHECK;
1046 :
1047 : /*
1048 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1049 : * we are looking for at this point.
1050 : */
1051 11420744 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1052 552 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1053 :
1054 : /*
1055 : * Check the constraints of the tuple.
1056 : */
1057 11420570 : if (resultRelationDesc->rd_att->constr)
1058 3239252 : ExecConstraints(resultRelInfo, slot, estate);
1059 :
1060 : /*
1061 : * Also check the tuple against the partition constraint, if there is
1062 : * one; except that if we got here via tuple-routing, we don't need to
1063 : * if there's no BR trigger defined on the partition.
1064 : */
1065 11419936 : if (resultRelationDesc->rd_rel->relispartition &&
1066 726246 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1067 721428 : (resultRelInfo->ri_TrigDesc &&
1068 1598 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1069 5014 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1070 :
1071 11419768 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1072 4112 : {
1073 : /* Perform a speculative insertion. */
1074 : uint32 specToken;
1075 : ItemPointerData conflictTid;
1076 : ItemPointerData invalidItemPtr;
1077 : bool specConflict;
1078 : List *arbiterIndexes;
1079 :
1080 9542 : ItemPointerSetInvalid(&invalidItemPtr);
1081 9542 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1082 :
1083 : /*
1084 : * Do a non-conclusive check for conflicts first.
1085 : *
1086 : * We're not holding any locks yet, so this doesn't guarantee that
1087 : * the later insert won't conflict. But it avoids leaving behind
1088 : * a lot of canceled speculative insertions, if you run a lot of
1089 : * INSERT ON CONFLICT statements that do conflict.
1090 : *
1091 : * We loop back here if we find a conflict below, either during
1092 : * the pre-check, or when we re-check after inserting the tuple
1093 : * speculatively. Better allow interrupts in case some bug makes
1094 : * this an infinite loop.
1095 : */
1096 9552 : vlock:
1097 9552 : CHECK_FOR_INTERRUPTS();
1098 9552 : specConflict = false;
1099 9552 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1100 : &conflictTid, &invalidItemPtr,
1101 : arbiterIndexes))
1102 : {
1103 : /* committed conflict tuple found */
1104 5418 : if (onconflict == ONCONFLICT_UPDATE)
1105 : {
1106 : /*
1107 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1108 : * part. Be prepared to retry if the UPDATE fails because
1109 : * of another concurrent UPDATE/DELETE to the conflict
1110 : * tuple.
1111 : */
1112 5206 : TupleTableSlot *returning = NULL;
1113 :
1114 5206 : if (ExecOnConflictUpdate(context, resultRelInfo,
1115 : &conflictTid, slot, canSetTag,
1116 : &returning))
1117 : {
1118 5128 : InstrCountTuples2(&mtstate->ps, 1);
1119 5128 : return returning;
1120 : }
1121 : else
1122 0 : goto vlock;
1123 : }
1124 : else
1125 : {
1126 : /*
1127 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1128 : * verify that the tuple is visible to the executor's MVCC
1129 : * snapshot at higher isolation levels.
1130 : *
1131 : * Using ExecGetReturningSlot() to store the tuple for the
1132 : * recheck isn't that pretty, but we can't trivially use
1133 : * the input slot, because it might not be of a compatible
1134 : * type. As there's no conflicting usage of
1135 : * ExecGetReturningSlot() in the DO NOTHING case...
1136 : */
1137 : Assert(onconflict == ONCONFLICT_NOTHING);
1138 212 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1139 : ExecGetReturningSlot(estate, resultRelInfo));
1140 192 : InstrCountTuples2(&mtstate->ps, 1);
1141 192 : return NULL;
1142 : }
1143 : }
1144 :
1145 : /*
1146 : * Before we start insertion proper, acquire our "speculative
1147 : * insertion lock". Others can use that to wait for us to decide
1148 : * if we're going to go ahead with the insertion, instead of
1149 : * waiting for the whole transaction to complete.
1150 : */
1151 4128 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1152 :
1153 : /* insert the tuple, with the speculative token */
1154 4128 : table_tuple_insert_speculative(resultRelationDesc, slot,
1155 : estate->es_output_cid,
1156 : 0,
1157 : NULL,
1158 : specToken);
1159 :
1160 : /* insert index entries for tuple */
1161 4128 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1162 : slot, estate, false, true,
1163 : &specConflict,
1164 : arbiterIndexes,
1165 : false);
1166 :
1167 : /* adjust the tuple's state accordingly */
1168 4122 : table_tuple_complete_speculative(resultRelationDesc, slot,
1169 4122 : specToken, !specConflict);
1170 :
1171 : /*
1172 : * Wake up anyone waiting for our decision. They will re-check
1173 : * the tuple, see that it's no longer speculative, and wait on our
1174 : * XID as if this was a regularly inserted tuple all along. Or if
1175 : * we killed the tuple, they will see it's dead, and proceed as if
1176 : * the tuple never existed.
1177 : */
1178 4122 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1179 :
1180 : /*
1181 : * If there was a conflict, start from the beginning. We'll do
1182 : * the pre-check again, which will now find the conflicting tuple
1183 : * (unless it aborts before we get there).
1184 : */
1185 4122 : if (specConflict)
1186 : {
1187 10 : list_free(recheckIndexes);
1188 10 : goto vlock;
1189 : }
1190 :
1191 : /* Since there was no insertion conflict, we're done */
1192 : }
1193 : else
1194 : {
1195 : /* insert the tuple normally */
1196 11410226 : table_tuple_insert(resultRelationDesc, slot,
1197 : estate->es_output_cid,
1198 : 0, NULL);
1199 :
1200 : /* insert index entries for tuple */
1201 11410190 : if (resultRelInfo->ri_NumIndices > 0)
1202 2999624 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1203 : slot, estate, false,
1204 : false, NULL, NIL,
1205 : false);
1206 : }
1207 : }
1208 :
1209 11415602 : if (canSetTag)
1210 11414430 : (estate->es_processed)++;
1211 :
1212 : /*
1213 : * If this insert is the result of a partition key update that moved the
1214 : * tuple to a new partition, put this row into the transition NEW TABLE,
1215 : * if there is one. We need to do this separately for DELETE and INSERT
1216 : * because they happen on different tables.
1217 : */
1218 11415602 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1219 11415602 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1220 42 : && mtstate->mt_transition_capture->tcs_update_new_table)
1221 : {
1222 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1223 : NULL, NULL,
1224 : NULL,
1225 : NULL,
1226 : slot,
1227 : NULL,
1228 42 : mtstate->mt_transition_capture,
1229 : false);
1230 :
1231 : /*
1232 : * We've already captured the NEW TABLE row, so make sure any AR
1233 : * INSERT trigger fired below doesn't capture it again.
1234 : */
1235 42 : ar_insert_trig_tcs = NULL;
1236 : }
1237 :
1238 : /* AFTER ROW INSERT Triggers */
1239 11415602 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1240 : ar_insert_trig_tcs);
1241 :
1242 11415602 : list_free(recheckIndexes);
1243 :
1244 : /*
1245 : * Check any WITH CHECK OPTION constraints from parent views. We are
1246 : * required to do this after testing all constraints and uniqueness
1247 : * violations per the SQL spec, so we do it after actually inserting the
1248 : * record into the heap and all indexes.
1249 : *
1250 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1251 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1252 : *
1253 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1254 : * are looking for at this point.
1255 : */
1256 11415602 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1257 364 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1258 :
1259 : /* Process RETURNING if present */
1260 11415456 : if (resultRelInfo->ri_projectReturning)
1261 : {
1262 3550 : TupleTableSlot *oldSlot = NULL;
1263 :
1264 : /*
1265 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1266 : * refers to any OLD columns, ExecDelete() will have saved the tuple
1267 : * deleted from the original partition, which we must use here to
1268 : * compute the OLD column values. Otherwise, all OLD column values
1269 : * will be NULL.
1270 : */
1271 3550 : if (context->cpDeletedSlot)
1272 : {
1273 : TupleConversionMap *tupconv_map;
1274 :
1275 : /*
1276 : * Convert the OLD tuple to the new partition's format/slot, if
1277 : * needed. Note that ExceDelete() already converted it to the
1278 : * root's partition's format/slot.
1279 : */
1280 44 : oldSlot = context->cpDeletedSlot;
1281 44 : tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1282 44 : if (tupconv_map != NULL)
1283 : {
1284 14 : oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1285 : oldSlot,
1286 : ExecGetReturningSlot(estate,
1287 : resultRelInfo));
1288 :
1289 14 : oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1290 14 : ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1291 : }
1292 : }
1293 :
1294 3550 : result = ExecProcessReturning(context, resultRelInfo, CMD_INSERT,
1295 : oldSlot, slot, planSlot);
1296 :
1297 : /*
1298 : * For a cross-partition UPDATE, release the old tuple, first making
1299 : * sure that the result slot has a local copy of any pass-by-reference
1300 : * values.
1301 : */
1302 3538 : if (context->cpDeletedSlot)
1303 : {
1304 44 : ExecMaterializeSlot(result);
1305 44 : ExecClearTuple(oldSlot);
1306 44 : if (context->cpDeletedSlot != oldSlot)
1307 14 : ExecClearTuple(context->cpDeletedSlot);
1308 44 : context->cpDeletedSlot = NULL;
1309 : }
1310 : }
1311 :
1312 11415444 : if (inserted_tuple)
1313 790 : *inserted_tuple = slot;
1314 11415444 : if (insert_destrel)
1315 790 : *insert_destrel = resultRelInfo;
1316 :
1317 11415444 : return result;
1318 : }
1319 :
1320 : /* ----------------------------------------------------------------
1321 : * ExecBatchInsert
1322 : *
1323 : * Insert multiple tuples in an efficient way.
1324 : * Currently, this handles inserting into a foreign table without
1325 : * RETURNING clause.
1326 : * ----------------------------------------------------------------
1327 : */
1328 : static void
1329 56 : ExecBatchInsert(ModifyTableState *mtstate,
1330 : ResultRelInfo *resultRelInfo,
1331 : TupleTableSlot **slots,
1332 : TupleTableSlot **planSlots,
1333 : int numSlots,
1334 : EState *estate,
1335 : bool canSetTag)
1336 : {
1337 : int i;
1338 56 : int numInserted = numSlots;
1339 56 : TupleTableSlot *slot = NULL;
1340 : TupleTableSlot **rslots;
1341 :
1342 : /*
1343 : * insert into foreign table: let the FDW do it
1344 : */
1345 56 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1346 : resultRelInfo,
1347 : slots,
1348 : planSlots,
1349 : &numInserted);
1350 :
1351 344 : for (i = 0; i < numInserted; i++)
1352 : {
1353 288 : slot = rslots[i];
1354 :
1355 : /*
1356 : * AFTER ROW Triggers might reference the tableoid column, so
1357 : * (re-)initialize tts_tableOid before evaluating them.
1358 : */
1359 288 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1360 :
1361 : /* AFTER ROW INSERT Triggers */
1362 288 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1363 288 : mtstate->mt_transition_capture);
1364 :
1365 : /*
1366 : * Check any WITH CHECK OPTION constraints from parent views. See the
1367 : * comment in ExecInsert.
1368 : */
1369 288 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1370 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1371 : }
1372 :
1373 56 : if (canSetTag && numInserted > 0)
1374 56 : estate->es_processed += numInserted;
1375 :
1376 : /* Clean up all the slots, ready for the next batch */
1377 344 : for (i = 0; i < numSlots; i++)
1378 : {
1379 288 : ExecClearTuple(slots[i]);
1380 288 : ExecClearTuple(planSlots[i]);
1381 : }
1382 56 : resultRelInfo->ri_NumSlots = 0;
1383 56 : }
1384 :
1385 : /*
1386 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1387 : */
1388 : static void
1389 34 : ExecPendingInserts(EState *estate)
1390 : {
1391 : ListCell *l1,
1392 : *l2;
1393 :
1394 70 : forboth(l1, estate->es_insert_pending_result_relations,
1395 : l2, estate->es_insert_pending_modifytables)
1396 : {
1397 36 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1398 36 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1399 :
1400 : Assert(mtstate);
1401 36 : ExecBatchInsert(mtstate, resultRelInfo,
1402 : resultRelInfo->ri_Slots,
1403 : resultRelInfo->ri_PlanSlots,
1404 : resultRelInfo->ri_NumSlots,
1405 36 : estate, mtstate->canSetTag);
1406 : }
1407 :
1408 34 : list_free(estate->es_insert_pending_result_relations);
1409 34 : list_free(estate->es_insert_pending_modifytables);
1410 34 : estate->es_insert_pending_result_relations = NIL;
1411 34 : estate->es_insert_pending_modifytables = NIL;
1412 34 : }
1413 :
1414 : /*
1415 : * ExecDeletePrologue -- subroutine for ExecDelete
1416 : *
1417 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1418 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1419 : * the delete a no-op; otherwise, return true.
1420 : */
1421 : static bool
1422 1639950 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1423 : ItemPointer tupleid, HeapTuple oldtuple,
1424 : TupleTableSlot **epqreturnslot, TM_Result *result)
1425 : {
1426 1639950 : if (result)
1427 1412 : *result = TM_Ok;
1428 :
1429 : /* BEFORE ROW DELETE triggers */
1430 1639950 : if (resultRelInfo->ri_TrigDesc &&
1431 7126 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1432 : {
1433 : /* Flush any pending inserts, so rows are visible to the triggers */
1434 388 : if (context->estate->es_insert_pending_result_relations != NIL)
1435 2 : ExecPendingInserts(context->estate);
1436 :
1437 388 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1438 : resultRelInfo, tupleid, oldtuple,
1439 : epqreturnslot, result, &context->tmfd);
1440 : }
1441 :
1442 1639562 : return true;
1443 : }
1444 :
1445 : /*
1446 : * ExecDeleteAct -- subroutine for ExecDelete
1447 : *
1448 : * Actually delete the tuple from a plain table.
1449 : *
1450 : * Caller is in charge of doing EvalPlanQual as necessary
1451 : */
1452 : static TM_Result
1453 1639754 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1454 : ItemPointer tupleid, bool changingPart)
1455 : {
1456 1639754 : EState *estate = context->estate;
1457 :
1458 1639754 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1459 : estate->es_output_cid,
1460 : estate->es_snapshot,
1461 : estate->es_crosscheck_snapshot,
1462 : true /* wait for commit */ ,
1463 : &context->tmfd,
1464 : changingPart);
1465 : }
1466 :
1467 : /*
1468 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1469 : *
1470 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1471 : * including the UPDATE triggers if the deletion is being done as part of a
1472 : * cross-partition tuple move.
1473 : */
1474 : static void
1475 1639702 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1476 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1477 : {
1478 1639702 : ModifyTableState *mtstate = context->mtstate;
1479 1639702 : EState *estate = context->estate;
1480 : TransitionCaptureState *ar_delete_trig_tcs;
1481 :
1482 : /*
1483 : * If this delete is the result of a partition key update that moved the
1484 : * tuple to a new partition, put this row into the transition OLD TABLE,
1485 : * if there is one. We need to do this separately for DELETE and INSERT
1486 : * because they happen on different tables.
1487 : */
1488 1639702 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1489 1639702 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1490 42 : mtstate->mt_transition_capture->tcs_update_old_table)
1491 : {
1492 42 : ExecARUpdateTriggers(estate, resultRelInfo,
1493 : NULL, NULL,
1494 : tupleid, oldtuple,
1495 42 : NULL, NULL, mtstate->mt_transition_capture,
1496 : false);
1497 :
1498 : /*
1499 : * We've already captured the OLD TABLE row, so make sure any AR
1500 : * DELETE trigger fired below doesn't capture it again.
1501 : */
1502 42 : ar_delete_trig_tcs = NULL;
1503 : }
1504 :
1505 : /* AFTER ROW DELETE Triggers */
1506 1639702 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1507 : ar_delete_trig_tcs, changingPart);
1508 1639702 : }
1509 :
1510 : /* ----------------------------------------------------------------
1511 : * ExecDelete
1512 : *
1513 : * DELETE is like UPDATE, except that we delete the tuple and no
1514 : * index modifications are needed.
1515 : *
1516 : * When deleting from a table, tupleid identifies the tuple to delete and
1517 : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1518 : * oldtuple is passed to the triggers and identifies what to delete, and
1519 : * tupleid is invalid. When deleting from a foreign table, tupleid is
1520 : * invalid; the FDW has to figure out which row to delete using data from
1521 : * the planSlot. oldtuple is passed to foreign table triggers; it is
1522 : * NULL when the foreign table has no relevant triggers. We use
1523 : * tupleDeleted to indicate whether the tuple is actually deleted,
1524 : * callers can use it to decide whether to continue the operation. When
1525 : * this DELETE is a part of an UPDATE of partition-key, then the slot
1526 : * returned by EvalPlanQual() is passed back using output parameter
1527 : * epqreturnslot.
1528 : *
1529 : * Returns RETURNING result if any, otherwise NULL.
1530 : * ----------------------------------------------------------------
1531 : */
1532 : static TupleTableSlot *
1533 1639562 : ExecDelete(ModifyTableContext *context,
1534 : ResultRelInfo *resultRelInfo,
1535 : ItemPointer tupleid,
1536 : HeapTuple oldtuple,
1537 : bool processReturning,
1538 : bool changingPart,
1539 : bool canSetTag,
1540 : TM_Result *tmresult,
1541 : bool *tupleDeleted,
1542 : TupleTableSlot **epqreturnslot)
1543 : {
1544 1639562 : EState *estate = context->estate;
1545 1639562 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1546 1639562 : TupleTableSlot *slot = NULL;
1547 : TM_Result result;
1548 : bool saveOld;
1549 :
1550 1639562 : if (tupleDeleted)
1551 1024 : *tupleDeleted = false;
1552 :
1553 : /*
1554 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1555 : * done if it says we are.
1556 : */
1557 1639562 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1558 : epqreturnslot, tmresult))
1559 52 : return NULL;
1560 :
1561 : /* INSTEAD OF ROW DELETE Triggers */
1562 1639476 : if (resultRelInfo->ri_TrigDesc &&
1563 6984 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1564 48 : {
1565 : bool dodelete;
1566 :
1567 : Assert(oldtuple != NULL);
1568 54 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1569 :
1570 54 : if (!dodelete) /* "do nothing" */
1571 6 : return NULL;
1572 : }
1573 1639422 : else if (resultRelInfo->ri_FdwRoutine)
1574 : {
1575 : /*
1576 : * delete from foreign table: let the FDW do it
1577 : *
1578 : * We offer the returning slot as a place to store RETURNING data,
1579 : * although the FDW can return some other slot if it wants.
1580 : */
1581 42 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1582 42 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1583 : resultRelInfo,
1584 : slot,
1585 : context->planSlot);
1586 :
1587 42 : if (slot == NULL) /* "do nothing" */
1588 0 : return NULL;
1589 :
1590 : /*
1591 : * RETURNING expressions might reference the tableoid column, so
1592 : * (re)initialize tts_tableOid before evaluating them.
1593 : */
1594 42 : if (TTS_EMPTY(slot))
1595 6 : ExecStoreAllNullTuple(slot);
1596 :
1597 42 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1598 : }
1599 : else
1600 : {
1601 : /*
1602 : * delete the tuple
1603 : *
1604 : * Note: if context->estate->es_crosscheck_snapshot isn't
1605 : * InvalidSnapshot, we check that the row to be deleted is visible to
1606 : * that snapshot, and throw a can't-serialize error if not. This is a
1607 : * special-case behavior needed for referential integrity updates in
1608 : * transaction-snapshot mode transactions.
1609 : */
1610 1639380 : ldelete:
1611 1639384 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1612 :
1613 1639348 : if (tmresult)
1614 990 : *tmresult = result;
1615 :
1616 1639348 : switch (result)
1617 : {
1618 30 : case TM_SelfModified:
1619 :
1620 : /*
1621 : * The target tuple was already updated or deleted by the
1622 : * current command, or by a later command in the current
1623 : * transaction. The former case is possible in a join DELETE
1624 : * where multiple tuples join to the same target tuple. This
1625 : * is somewhat questionable, but Postgres has always allowed
1626 : * it: we just ignore additional deletion attempts.
1627 : *
1628 : * The latter case arises if the tuple is modified by a
1629 : * command in a BEFORE trigger, or perhaps by a command in a
1630 : * volatile function used in the query. In such situations we
1631 : * should not ignore the deletion, but it is equally unsafe to
1632 : * proceed. We don't want to discard the original DELETE
1633 : * while keeping the triggered actions based on its deletion;
1634 : * and it would be no better to allow the original DELETE
1635 : * while discarding updates that it triggered. The row update
1636 : * carries some information that might be important according
1637 : * to business rules; so throwing an error is the only safe
1638 : * course.
1639 : *
1640 : * If a trigger actually intends this type of interaction, it
1641 : * can re-execute the DELETE and then return NULL to cancel
1642 : * the outer delete.
1643 : */
1644 30 : if (context->tmfd.cmax != estate->es_output_cid)
1645 6 : ereport(ERROR,
1646 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1647 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1648 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1649 :
1650 : /* Else, already deleted by self; nothing to do */
1651 24 : return NULL;
1652 :
1653 1639254 : case TM_Ok:
1654 1639254 : break;
1655 :
1656 58 : case TM_Updated:
1657 : {
1658 : TupleTableSlot *inputslot;
1659 : TupleTableSlot *epqslot;
1660 :
1661 58 : if (IsolationUsesXactSnapshot())
1662 2 : ereport(ERROR,
1663 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1664 : errmsg("could not serialize access due to concurrent update")));
1665 :
1666 : /*
1667 : * Already know that we're going to need to do EPQ, so
1668 : * fetch tuple directly into the right slot.
1669 : */
1670 56 : EvalPlanQualBegin(context->epqstate);
1671 56 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1672 : resultRelInfo->ri_RangeTableIndex);
1673 :
1674 56 : result = table_tuple_lock(resultRelationDesc, tupleid,
1675 : estate->es_snapshot,
1676 : inputslot, estate->es_output_cid,
1677 : LockTupleExclusive, LockWaitBlock,
1678 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1679 : &context->tmfd);
1680 :
1681 52 : switch (result)
1682 : {
1683 46 : case TM_Ok:
1684 : Assert(context->tmfd.traversed);
1685 46 : epqslot = EvalPlanQual(context->epqstate,
1686 : resultRelationDesc,
1687 : resultRelInfo->ri_RangeTableIndex,
1688 : inputslot);
1689 46 : if (TupIsNull(epqslot))
1690 : /* Tuple not passing quals anymore, exiting... */
1691 30 : return NULL;
1692 :
1693 : /*
1694 : * If requested, skip delete and pass back the
1695 : * updated row.
1696 : */
1697 16 : if (epqreturnslot)
1698 : {
1699 12 : *epqreturnslot = epqslot;
1700 12 : return NULL;
1701 : }
1702 : else
1703 4 : goto ldelete;
1704 :
1705 4 : case TM_SelfModified:
1706 :
1707 : /*
1708 : * This can be reached when following an update
1709 : * chain from a tuple updated by another session,
1710 : * reaching a tuple that was already updated in
1711 : * this transaction. If previously updated by this
1712 : * command, ignore the delete, otherwise error
1713 : * out.
1714 : *
1715 : * See also TM_SelfModified response to
1716 : * table_tuple_delete() above.
1717 : */
1718 4 : if (context->tmfd.cmax != estate->es_output_cid)
1719 2 : ereport(ERROR,
1720 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1721 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1722 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1723 2 : return NULL;
1724 :
1725 2 : case TM_Deleted:
1726 : /* tuple already deleted; nothing to do */
1727 2 : return NULL;
1728 :
1729 0 : default:
1730 :
1731 : /*
1732 : * TM_Invisible should be impossible because we're
1733 : * waiting for updated row versions, and would
1734 : * already have errored out if the first version
1735 : * is invisible.
1736 : *
1737 : * TM_Updated should be impossible, because we're
1738 : * locking the latest version via
1739 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1740 : */
1741 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1742 : result);
1743 : return NULL;
1744 : }
1745 :
1746 : Assert(false);
1747 : break;
1748 : }
1749 :
1750 6 : case TM_Deleted:
1751 6 : if (IsolationUsesXactSnapshot())
1752 0 : ereport(ERROR,
1753 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1754 : errmsg("could not serialize access due to concurrent delete")));
1755 : /* tuple already deleted; nothing to do */
1756 6 : return NULL;
1757 :
1758 0 : default:
1759 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1760 : result);
1761 : return NULL;
1762 : }
1763 :
1764 : /*
1765 : * Note: Normally one would think that we have to delete index tuples
1766 : * associated with the heap tuple now...
1767 : *
1768 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1769 : * take care of it later. We can't delete index tuples immediately
1770 : * anyway, since the tuple is still visible to other transactions.
1771 : */
1772 : }
1773 :
1774 1639344 : if (canSetTag)
1775 1638170 : (estate->es_processed)++;
1776 :
1777 : /* Tell caller that the delete actually happened. */
1778 1639344 : if (tupleDeleted)
1779 946 : *tupleDeleted = true;
1780 :
1781 1639344 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1782 :
1783 : /*
1784 : * Process RETURNING if present and if requested.
1785 : *
1786 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1787 : * refers to any OLD column values, save the old tuple here for later
1788 : * processing of the RETURNING list by ExecInsert().
1789 : */
1790 1639490 : saveOld = changingPart && resultRelInfo->ri_projectReturning &&
1791 146 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD;
1792 :
1793 1639344 : if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
1794 : {
1795 : /*
1796 : * We have to put the target tuple into a slot, which means first we
1797 : * gotta fetch it. We can use the trigger tuple slot.
1798 : */
1799 : TupleTableSlot *rslot;
1800 :
1801 984 : if (resultRelInfo->ri_FdwRoutine)
1802 : {
1803 : /* FDW must have provided a slot containing the deleted row */
1804 : Assert(!TupIsNull(slot));
1805 : }
1806 : else
1807 : {
1808 970 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1809 970 : if (oldtuple != NULL)
1810 : {
1811 24 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1812 : }
1813 : else
1814 : {
1815 946 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1816 : SnapshotAny, slot))
1817 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1818 : }
1819 : }
1820 :
1821 : /*
1822 : * If required, save the old tuple for later processing of the
1823 : * RETURNING list by ExecInsert().
1824 : */
1825 984 : if (saveOld)
1826 : {
1827 : TupleConversionMap *tupconv_map;
1828 :
1829 : /*
1830 : * Convert the tuple into the root partition's format/slot, if
1831 : * needed. ExecInsert() will then convert it to the new
1832 : * partition's format/slot, if necessary.
1833 : */
1834 44 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1835 44 : if (tupconv_map != NULL)
1836 : {
1837 18 : ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
1838 18 : TupleTableSlot *oldSlot = slot;
1839 :
1840 18 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1841 : slot,
1842 : ExecGetReturningSlot(estate,
1843 : rootRelInfo));
1844 :
1845 18 : slot->tts_tableOid = oldSlot->tts_tableOid;
1846 18 : ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
1847 : }
1848 :
1849 44 : context->cpDeletedSlot = slot;
1850 :
1851 44 : return NULL;
1852 : }
1853 :
1854 940 : rslot = ExecProcessReturning(context, resultRelInfo, CMD_DELETE,
1855 : slot, NULL, context->planSlot);
1856 :
1857 : /*
1858 : * Before releasing the target tuple again, make sure rslot has a
1859 : * local copy of any pass-by-reference values.
1860 : */
1861 940 : ExecMaterializeSlot(rslot);
1862 :
1863 940 : ExecClearTuple(slot);
1864 :
1865 940 : return rslot;
1866 : }
1867 :
1868 1638360 : return NULL;
1869 : }
1870 :
1871 : /*
1872 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1873 : *
1874 : * This works by first deleting the old tuple from the current partition,
1875 : * followed by inserting the new tuple into the root parent table, that is,
1876 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1877 : * correct partition.
1878 : *
1879 : * Returns true if the tuple has been successfully moved, or if it's found
1880 : * that the tuple was concurrently deleted so there's nothing more to do
1881 : * for the caller.
1882 : *
1883 : * False is returned if the tuple we're trying to move is found to have been
1884 : * concurrently updated. In that case, the caller must check if the updated
1885 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1886 : * this function again or perform a regular update accordingly. For MERGE,
1887 : * the updated tuple is not returned in *retry_slot; it has its own retry
1888 : * logic.
1889 : */
1890 : static bool
1891 1072 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1892 : ResultRelInfo *resultRelInfo,
1893 : ItemPointer tupleid, HeapTuple oldtuple,
1894 : TupleTableSlot *slot,
1895 : bool canSetTag,
1896 : UpdateContext *updateCxt,
1897 : TM_Result *tmresult,
1898 : TupleTableSlot **retry_slot,
1899 : TupleTableSlot **inserted_tuple,
1900 : ResultRelInfo **insert_destrel)
1901 : {
1902 1072 : ModifyTableState *mtstate = context->mtstate;
1903 1072 : EState *estate = mtstate->ps.state;
1904 : TupleConversionMap *tupconv_map;
1905 : bool tuple_deleted;
1906 1072 : TupleTableSlot *epqslot = NULL;
1907 :
1908 1072 : context->cpDeletedSlot = NULL;
1909 1072 : context->cpUpdateReturningSlot = NULL;
1910 1072 : *retry_slot = NULL;
1911 :
1912 : /*
1913 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1914 : * to migrate to a different partition. Maybe this can be implemented
1915 : * some day, but it seems a fringe feature with little redeeming value.
1916 : */
1917 1072 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1918 0 : ereport(ERROR,
1919 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1920 : errmsg("invalid ON UPDATE specification"),
1921 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1922 :
1923 : /*
1924 : * When an UPDATE is run directly on a leaf partition, simply fail with a
1925 : * partition constraint violation error.
1926 : */
1927 1072 : if (resultRelInfo == mtstate->rootResultRelInfo)
1928 48 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1929 :
1930 : /* Initialize tuple routing info if not already done. */
1931 1024 : if (mtstate->mt_partition_tuple_routing == NULL)
1932 : {
1933 644 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1934 : MemoryContext oldcxt;
1935 :
1936 : /* Things built here have to last for the query duration. */
1937 644 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1938 :
1939 644 : mtstate->mt_partition_tuple_routing =
1940 644 : ExecSetupPartitionTupleRouting(estate, rootRel);
1941 :
1942 : /*
1943 : * Before a partition's tuple can be re-routed, it must first be
1944 : * converted to the root's format, so we'll need a slot for storing
1945 : * such tuples.
1946 : */
1947 : Assert(mtstate->mt_root_tuple_slot == NULL);
1948 644 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1949 :
1950 644 : MemoryContextSwitchTo(oldcxt);
1951 : }
1952 :
1953 : /*
1954 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1955 : * We want to return rows from INSERT.
1956 : */
1957 1024 : ExecDelete(context, resultRelInfo,
1958 : tupleid, oldtuple,
1959 : false, /* processReturning */
1960 : true, /* changingPart */
1961 : false, /* canSetTag */
1962 : tmresult, &tuple_deleted, &epqslot);
1963 :
1964 : /*
1965 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
1966 : * it was already deleted by self, or it was concurrently deleted by
1967 : * another transaction), then we should skip the insert as well;
1968 : * otherwise, an UPDATE could cause an increase in the total number of
1969 : * rows across all partitions, which is clearly wrong.
1970 : *
1971 : * For a normal UPDATE, the case where the tuple has been the subject of a
1972 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
1973 : * machinery, but for an UPDATE that we've translated into a DELETE from
1974 : * this partition and an INSERT into some other partition, that's not
1975 : * available, because CTID chains can't span relation boundaries. We
1976 : * mimic the semantics to a limited extent by skipping the INSERT if the
1977 : * DELETE fails to find a tuple. This ensures that two concurrent
1978 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
1979 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
1980 : * it.
1981 : */
1982 1022 : if (!tuple_deleted)
1983 : {
1984 : /*
1985 : * epqslot will be typically NULL. But when ExecDelete() finds that
1986 : * another transaction has concurrently updated the same row, it
1987 : * re-fetches the row, skips the delete, and epqslot is set to the
1988 : * re-fetched tuple slot. In that case, we need to do all the checks
1989 : * again. For MERGE, we leave everything to the caller (it must do
1990 : * additional rechecking, and might end up executing a different
1991 : * action entirely).
1992 : */
1993 76 : if (mtstate->operation == CMD_MERGE)
1994 34 : return *tmresult == TM_Ok;
1995 42 : else if (TupIsNull(epqslot))
1996 36 : return true;
1997 : else
1998 : {
1999 : /* Fetch the most recent version of old tuple. */
2000 : TupleTableSlot *oldSlot;
2001 :
2002 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
2003 6 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2004 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
2005 6 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2006 6 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2007 : tupleid,
2008 : SnapshotAny,
2009 : oldSlot))
2010 0 : elog(ERROR, "failed to fetch tuple being updated");
2011 : /* and project the new tuple to retry the UPDATE with */
2012 6 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2013 : oldSlot);
2014 6 : return false;
2015 : }
2016 : }
2017 :
2018 : /*
2019 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
2020 : * convert the tuple into root's tuple descriptor if needed, since
2021 : * ExecInsert() starts the search from root.
2022 : */
2023 946 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2024 946 : if (tupconv_map != NULL)
2025 314 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2026 : slot,
2027 : mtstate->mt_root_tuple_slot);
2028 :
2029 : /* Tuple routing starts from the root table. */
2030 818 : context->cpUpdateReturningSlot =
2031 946 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2032 : inserted_tuple, insert_destrel);
2033 :
2034 : /*
2035 : * Reset the transition state that may possibly have been written by
2036 : * INSERT.
2037 : */
2038 818 : if (mtstate->mt_transition_capture)
2039 42 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
2040 :
2041 : /* We're done moving. */
2042 818 : return true;
2043 : }
2044 :
2045 : /*
2046 : * ExecUpdatePrologue -- subroutine for ExecUpdate
2047 : *
2048 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
2049 : * triggers. We return false if one of them makes the update a no-op;
2050 : * otherwise, return true.
2051 : */
2052 : static bool
2053 316368 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2054 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2055 : TM_Result *result)
2056 : {
2057 316368 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2058 :
2059 316368 : if (result)
2060 2128 : *result = TM_Ok;
2061 :
2062 316368 : ExecMaterializeSlot(slot);
2063 :
2064 : /*
2065 : * Open the table's indexes, if we have not done so already, so that we
2066 : * can add new index entries for the updated tuple.
2067 : */
2068 316368 : if (resultRelationDesc->rd_rel->relhasindex &&
2069 225640 : resultRelInfo->ri_IndexRelationDescs == NULL)
2070 8306 : ExecOpenIndices(resultRelInfo, false);
2071 :
2072 : /* BEFORE ROW UPDATE triggers */
2073 316368 : if (resultRelInfo->ri_TrigDesc &&
2074 6304 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
2075 : {
2076 : /* Flush any pending inserts, so rows are visible to the triggers */
2077 2572 : if (context->estate->es_insert_pending_result_relations != NIL)
2078 2 : ExecPendingInserts(context->estate);
2079 :
2080 2572 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
2081 : resultRelInfo, tupleid, oldtuple, slot,
2082 : result, &context->tmfd);
2083 : }
2084 :
2085 313796 : return true;
2086 : }
2087 :
2088 : /*
2089 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2090 : *
2091 : * Apply the final modifications to the tuple slot before the update.
2092 : * (This is split out because we also need it in the foreign-table code path.)
2093 : */
2094 : static void
2095 316082 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
2096 : TupleTableSlot *slot,
2097 : EState *estate)
2098 : {
2099 316082 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2100 :
2101 : /*
2102 : * Constraints and GENERATED expressions might reference the tableoid
2103 : * column, so (re-)initialize tts_tableOid before evaluating them.
2104 : */
2105 316082 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2106 :
2107 : /*
2108 : * Compute stored generated columns
2109 : */
2110 316082 : if (resultRelationDesc->rd_att->constr &&
2111 188730 : resultRelationDesc->rd_att->constr->has_generated_stored)
2112 258 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2113 : CMD_UPDATE);
2114 316082 : }
2115 :
2116 : /*
2117 : * ExecUpdateAct -- subroutine for ExecUpdate
2118 : *
2119 : * Actually update the tuple, when operating on a plain table. If the
2120 : * table is a partition, and the command was called referencing an ancestor
2121 : * partitioned table, this routine migrates the resulting tuple to another
2122 : * partition.
2123 : *
2124 : * The caller is in charge of keeping indexes current as necessary. The
2125 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
2126 : * be concurrently updated. However, in case of a cross-partition update,
2127 : * this routine does it.
2128 : */
2129 : static TM_Result
2130 315890 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2131 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2132 : bool canSetTag, UpdateContext *updateCxt)
2133 : {
2134 315890 : EState *estate = context->estate;
2135 315890 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2136 : bool partition_constraint_failed;
2137 : TM_Result result;
2138 :
2139 315890 : updateCxt->crossPartUpdate = false;
2140 :
2141 : /*
2142 : * If we move the tuple to a new partition, we loop back here to recompute
2143 : * GENERATED values (which are allowed to be different across partitions)
2144 : * and recheck any RLS policies and constraints. We do not fire any
2145 : * BEFORE triggers of the new partition, however.
2146 : */
2147 315896 : lreplace:
2148 : /* Fill in GENERATEd columns */
2149 315896 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2150 :
2151 : /* ensure slot is independent, consider e.g. EPQ */
2152 315896 : ExecMaterializeSlot(slot);
2153 :
2154 : /*
2155 : * If partition constraint fails, this row might get moved to another
2156 : * partition, in which case we should check the RLS CHECK policy just
2157 : * before inserting into the new partition, rather than doing it here.
2158 : * This is because a trigger on that partition might again change the row.
2159 : * So skip the WCO checks if the partition constraint fails.
2160 : */
2161 315896 : partition_constraint_failed =
2162 318616 : resultRelationDesc->rd_rel->relispartition &&
2163 2720 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2164 :
2165 : /* Check any RLS UPDATE WITH CHECK policies */
2166 315896 : if (!partition_constraint_failed &&
2167 314824 : resultRelInfo->ri_WithCheckOptions != NIL)
2168 : {
2169 : /*
2170 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2171 : * we are looking for at this point.
2172 : */
2173 480 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2174 : resultRelInfo, slot, estate);
2175 : }
2176 :
2177 : /*
2178 : * If a partition check failed, try to move the row into the right
2179 : * partition.
2180 : */
2181 315842 : if (partition_constraint_failed)
2182 : {
2183 : TupleTableSlot *inserted_tuple,
2184 : *retry_slot;
2185 1072 : ResultRelInfo *insert_destrel = NULL;
2186 :
2187 : /*
2188 : * ExecCrossPartitionUpdate will first DELETE the row from the
2189 : * partition it's currently in and then insert it back into the root
2190 : * table, which will re-route it to the correct partition. However,
2191 : * if the tuple has been concurrently updated, a retry is needed.
2192 : */
2193 1072 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2194 : tupleid, oldtuple, slot,
2195 : canSetTag, updateCxt,
2196 : &result,
2197 : &retry_slot,
2198 : &inserted_tuple,
2199 : &insert_destrel))
2200 : {
2201 : /* success! */
2202 878 : updateCxt->crossPartUpdate = true;
2203 :
2204 : /*
2205 : * If the partitioned table being updated is referenced in foreign
2206 : * keys, queue up trigger events to check that none of them were
2207 : * violated. No special treatment is needed in
2208 : * non-cross-partition update situations, because the leaf
2209 : * partition's AR update triggers will take care of that. During
2210 : * cross-partition updates implemented as delete on the source
2211 : * partition followed by insert on the destination partition,
2212 : * AR-UPDATE triggers of the root table (that is, the table
2213 : * mentioned in the query) must be fired.
2214 : *
2215 : * NULL insert_destrel means that the move failed to occur, that
2216 : * is, the update failed, so no need to anything in that case.
2217 : */
2218 878 : if (insert_destrel &&
2219 790 : resultRelInfo->ri_TrigDesc &&
2220 362 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2221 300 : ExecCrossPartitionUpdateForeignKey(context,
2222 : resultRelInfo,
2223 : insert_destrel,
2224 : tupleid, slot,
2225 : inserted_tuple);
2226 :
2227 882 : return TM_Ok;
2228 : }
2229 :
2230 : /*
2231 : * No luck, a retry is needed. If running MERGE, we do not do so
2232 : * here; instead let it handle that on its own rules.
2233 : */
2234 16 : if (context->mtstate->operation == CMD_MERGE)
2235 10 : return result;
2236 :
2237 : /*
2238 : * ExecCrossPartitionUpdate installed an updated version of the new
2239 : * tuple in the retry slot; start over.
2240 : */
2241 6 : slot = retry_slot;
2242 6 : goto lreplace;
2243 : }
2244 :
2245 : /*
2246 : * Check the constraints of the tuple. We've already checked the
2247 : * partition constraint above; however, we must still ensure the tuple
2248 : * passes all other constraints, so we will call ExecConstraints() and
2249 : * have it validate all remaining checks.
2250 : */
2251 314770 : if (resultRelationDesc->rd_att->constr)
2252 188112 : ExecConstraints(resultRelInfo, slot, estate);
2253 :
2254 : /*
2255 : * replace the heap tuple
2256 : *
2257 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2258 : * the row to be updated is visible to that snapshot, and throw a
2259 : * can't-serialize error if not. This is a special-case behavior needed
2260 : * for referential integrity updates in transaction-snapshot mode
2261 : * transactions.
2262 : */
2263 314696 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2264 : estate->es_output_cid,
2265 : estate->es_snapshot,
2266 : estate->es_crosscheck_snapshot,
2267 : true /* wait for commit */ ,
2268 : &context->tmfd, &updateCxt->lockmode,
2269 : &updateCxt->updateIndexes);
2270 :
2271 314672 : return result;
2272 : }
2273 :
2274 : /*
2275 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2276 : *
2277 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2278 : * returns indicating that the tuple was updated.
2279 : */
2280 : static void
2281 314720 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2282 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2283 : HeapTuple oldtuple, TupleTableSlot *slot)
2284 : {
2285 314720 : ModifyTableState *mtstate = context->mtstate;
2286 314720 : List *recheckIndexes = NIL;
2287 :
2288 : /* insert index entries for tuple if necessary */
2289 314720 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2290 170856 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2291 : slot, context->estate,
2292 : true, false,
2293 : NULL, NIL,
2294 170856 : (updateCxt->updateIndexes == TU_Summarizing));
2295 :
2296 : /* AFTER ROW UPDATE Triggers */
2297 314630 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2298 : NULL, NULL,
2299 : tupleid, oldtuple, slot,
2300 : recheckIndexes,
2301 314630 : mtstate->operation == CMD_INSERT ?
2302 : mtstate->mt_oc_transition_capture :
2303 : mtstate->mt_transition_capture,
2304 : false);
2305 :
2306 314630 : list_free(recheckIndexes);
2307 :
2308 : /*
2309 : * Check any WITH CHECK OPTION constraints from parent views. We are
2310 : * required to do this after testing all constraints and uniqueness
2311 : * violations per the SQL spec, so we do it after actually updating the
2312 : * record in the heap and all indexes.
2313 : *
2314 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2315 : * are looking for at this point.
2316 : */
2317 314630 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2318 454 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2319 : slot, context->estate);
2320 314554 : }
2321 :
2322 : /*
2323 : * Queues up an update event using the target root partitioned table's
2324 : * trigger to check that a cross-partition update hasn't broken any foreign
2325 : * keys pointing into it.
2326 : */
2327 : static void
2328 300 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2329 : ResultRelInfo *sourcePartInfo,
2330 : ResultRelInfo *destPartInfo,
2331 : ItemPointer tupleid,
2332 : TupleTableSlot *oldslot,
2333 : TupleTableSlot *newslot)
2334 : {
2335 : ListCell *lc;
2336 : ResultRelInfo *rootRelInfo;
2337 : List *ancestorRels;
2338 :
2339 300 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2340 300 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2341 :
2342 : /*
2343 : * For any foreign keys that point directly into a non-root ancestors of
2344 : * the source partition, we can in theory fire an update event to enforce
2345 : * those constraints using their triggers, if we could tell that both the
2346 : * source and the destination partitions are under the same ancestor. But
2347 : * for now, we simply report an error that those cannot be enforced.
2348 : */
2349 654 : foreach(lc, ancestorRels)
2350 : {
2351 360 : ResultRelInfo *rInfo = lfirst(lc);
2352 360 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2353 360 : bool has_noncloned_fkey = false;
2354 :
2355 : /* Root ancestor's triggers will be processed. */
2356 360 : if (rInfo == rootRelInfo)
2357 294 : continue;
2358 :
2359 66 : if (trigdesc && trigdesc->trig_update_after_row)
2360 : {
2361 228 : for (int i = 0; i < trigdesc->numtriggers; i++)
2362 : {
2363 168 : Trigger *trig = &trigdesc->triggers[i];
2364 :
2365 174 : if (!trig->tgisclone &&
2366 6 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2367 : {
2368 6 : has_noncloned_fkey = true;
2369 6 : break;
2370 : }
2371 : }
2372 : }
2373 :
2374 66 : if (has_noncloned_fkey)
2375 6 : ereport(ERROR,
2376 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2377 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2378 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2379 : RelationGetRelationName(rInfo->ri_RelationDesc),
2380 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2381 : errhint("Consider defining the foreign key on table \"%s\".",
2382 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2383 : }
2384 :
2385 : /* Perform the root table's triggers. */
2386 294 : ExecARUpdateTriggers(context->estate,
2387 : rootRelInfo, sourcePartInfo, destPartInfo,
2388 : tupleid, NULL, newslot, NIL, NULL, true);
2389 294 : }
2390 :
2391 : /* ----------------------------------------------------------------
2392 : * ExecUpdate
2393 : *
2394 : * note: we can't run UPDATE queries with transactions
2395 : * off because UPDATEs are actually INSERTs and our
2396 : * scan will mistakenly loop forever, updating the tuple
2397 : * it just inserted.. This should be fixed but until it
2398 : * is, we don't want to get stuck in an infinite loop
2399 : * which corrupts your database..
2400 : *
2401 : * When updating a table, tupleid identifies the tuple to update and
2402 : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2403 : * oldtuple is passed to the triggers and identifies what to update, and
2404 : * tupleid is invalid. When updating a foreign table, tupleid is
2405 : * invalid; the FDW has to figure out which row to update using data from
2406 : * the planSlot. oldtuple is passed to foreign table triggers; it is
2407 : * NULL when the foreign table has no relevant triggers.
2408 : *
2409 : * oldSlot contains the old tuple value.
2410 : * slot contains the new tuple value to be stored.
2411 : * planSlot is the output of the ModifyTable's subplan; we use it
2412 : * to access values from other input tables (for RETURNING),
2413 : * row-ID junk columns, etc.
2414 : *
2415 : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2416 : * had identified the tuple to update, it will identify the tuple
2417 : * actually updated after EvalPlanQual.
2418 : * ----------------------------------------------------------------
2419 : */
2420 : static TupleTableSlot *
2421 314240 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2422 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2423 : TupleTableSlot *slot, bool canSetTag)
2424 : {
2425 314240 : EState *estate = context->estate;
2426 314240 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2427 314240 : UpdateContext updateCxt = {0};
2428 : TM_Result result;
2429 :
2430 : /*
2431 : * abort the operation if not running transactions
2432 : */
2433 314240 : if (IsBootstrapProcessingMode())
2434 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2435 :
2436 : /*
2437 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2438 : * done if it says we are.
2439 : */
2440 314240 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2441 138 : return NULL;
2442 :
2443 : /* INSTEAD OF ROW UPDATE Triggers */
2444 314066 : if (resultRelInfo->ri_TrigDesc &&
2445 5778 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2446 : {
2447 126 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2448 : oldtuple, slot))
2449 18 : return NULL; /* "do nothing" */
2450 : }
2451 313940 : else if (resultRelInfo->ri_FdwRoutine)
2452 : {
2453 : /* Fill in GENERATEd columns */
2454 186 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2455 :
2456 : /*
2457 : * update in foreign table: let the FDW do it
2458 : */
2459 186 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2460 : resultRelInfo,
2461 : slot,
2462 : context->planSlot);
2463 :
2464 186 : if (slot == NULL) /* "do nothing" */
2465 2 : return NULL;
2466 :
2467 : /*
2468 : * AFTER ROW Triggers or RETURNING expressions might reference the
2469 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2470 : * them. (This covers the case where the FDW replaced the slot.)
2471 : */
2472 184 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2473 : }
2474 : else
2475 : {
2476 : ItemPointerData lockedtid;
2477 :
2478 : /*
2479 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2480 : * must loop back here to try again. (We don't need to redo triggers,
2481 : * however. If there are any BEFORE triggers then trigger.c will have
2482 : * done table_tuple_lock to lock the correct tuple, so there's no need
2483 : * to do them again.)
2484 : */
2485 313754 : redo_act:
2486 313858 : lockedtid = *tupleid;
2487 313858 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2488 : canSetTag, &updateCxt);
2489 :
2490 : /*
2491 : * If ExecUpdateAct reports that a cross-partition update was done,
2492 : * then the RETURNING tuple (if any) has been projected and there's
2493 : * nothing else for us to do.
2494 : */
2495 313542 : if (updateCxt.crossPartUpdate)
2496 866 : return context->cpUpdateReturningSlot;
2497 :
2498 312804 : switch (result)
2499 : {
2500 84 : case TM_SelfModified:
2501 :
2502 : /*
2503 : * The target tuple was already updated or deleted by the
2504 : * current command, or by a later command in the current
2505 : * transaction. The former case is possible in a join UPDATE
2506 : * where multiple tuples join to the same target tuple. This
2507 : * is pretty questionable, but Postgres has always allowed it:
2508 : * we just execute the first update action and ignore
2509 : * additional update attempts.
2510 : *
2511 : * The latter case arises if the tuple is modified by a
2512 : * command in a BEFORE trigger, or perhaps by a command in a
2513 : * volatile function used in the query. In such situations we
2514 : * should not ignore the update, but it is equally unsafe to
2515 : * proceed. We don't want to discard the original UPDATE
2516 : * while keeping the triggered actions based on it; and we
2517 : * have no principled way to merge this update with the
2518 : * previous ones. So throwing an error is the only safe
2519 : * course.
2520 : *
2521 : * If a trigger actually intends this type of interaction, it
2522 : * can re-execute the UPDATE (assuming it can figure out how)
2523 : * and then return NULL to cancel the outer update.
2524 : */
2525 84 : if (context->tmfd.cmax != estate->es_output_cid)
2526 6 : ereport(ERROR,
2527 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2528 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2529 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2530 :
2531 : /* Else, already updated by self; nothing to do */
2532 78 : return NULL;
2533 :
2534 312556 : case TM_Ok:
2535 312556 : break;
2536 :
2537 156 : case TM_Updated:
2538 : {
2539 : TupleTableSlot *inputslot;
2540 : TupleTableSlot *epqslot;
2541 :
2542 156 : if (IsolationUsesXactSnapshot())
2543 4 : ereport(ERROR,
2544 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2545 : errmsg("could not serialize access due to concurrent update")));
2546 :
2547 : /*
2548 : * Already know that we're going to need to do EPQ, so
2549 : * fetch tuple directly into the right slot.
2550 : */
2551 152 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2552 : resultRelInfo->ri_RangeTableIndex);
2553 :
2554 152 : result = table_tuple_lock(resultRelationDesc, tupleid,
2555 : estate->es_snapshot,
2556 : inputslot, estate->es_output_cid,
2557 : updateCxt.lockmode, LockWaitBlock,
2558 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2559 : &context->tmfd);
2560 :
2561 148 : switch (result)
2562 : {
2563 138 : case TM_Ok:
2564 : Assert(context->tmfd.traversed);
2565 :
2566 138 : epqslot = EvalPlanQual(context->epqstate,
2567 : resultRelationDesc,
2568 : resultRelInfo->ri_RangeTableIndex,
2569 : inputslot);
2570 138 : if (TupIsNull(epqslot))
2571 : /* Tuple not passing quals anymore, exiting... */
2572 34 : return NULL;
2573 :
2574 : /* Make sure ri_oldTupleSlot is initialized. */
2575 104 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2576 0 : ExecInitUpdateProjection(context->mtstate,
2577 : resultRelInfo);
2578 :
2579 104 : if (resultRelInfo->ri_needLockTagTuple)
2580 : {
2581 2 : UnlockTuple(resultRelationDesc,
2582 : &lockedtid, InplaceUpdateTupleLock);
2583 2 : LockTuple(resultRelationDesc,
2584 : tupleid, InplaceUpdateTupleLock);
2585 : }
2586 :
2587 : /* Fetch the most recent version of old tuple. */
2588 104 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2589 104 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2590 : tupleid,
2591 : SnapshotAny,
2592 : oldSlot))
2593 0 : elog(ERROR, "failed to fetch tuple being updated");
2594 104 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2595 : epqslot, oldSlot);
2596 104 : goto redo_act;
2597 :
2598 2 : case TM_Deleted:
2599 : /* tuple already deleted; nothing to do */
2600 2 : return NULL;
2601 :
2602 8 : case TM_SelfModified:
2603 :
2604 : /*
2605 : * This can be reached when following an update
2606 : * chain from a tuple updated by another session,
2607 : * reaching a tuple that was already updated in
2608 : * this transaction. If previously modified by
2609 : * this command, ignore the redundant update,
2610 : * otherwise error out.
2611 : *
2612 : * See also TM_SelfModified response to
2613 : * table_tuple_update() above.
2614 : */
2615 8 : if (context->tmfd.cmax != estate->es_output_cid)
2616 2 : ereport(ERROR,
2617 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2618 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2619 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2620 6 : return NULL;
2621 :
2622 0 : default:
2623 : /* see table_tuple_lock call in ExecDelete() */
2624 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2625 : result);
2626 : return NULL;
2627 : }
2628 : }
2629 :
2630 : break;
2631 :
2632 8 : case TM_Deleted:
2633 8 : if (IsolationUsesXactSnapshot())
2634 0 : ereport(ERROR,
2635 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2636 : errmsg("could not serialize access due to concurrent delete")));
2637 : /* tuple already deleted; nothing to do */
2638 8 : return NULL;
2639 :
2640 0 : default:
2641 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2642 : result);
2643 : return NULL;
2644 : }
2645 : }
2646 :
2647 312836 : if (canSetTag)
2648 312238 : (estate->es_processed)++;
2649 :
2650 312836 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2651 : slot);
2652 :
2653 : /* Process RETURNING if present */
2654 312682 : if (resultRelInfo->ri_projectReturning)
2655 2322 : return ExecProcessReturning(context, resultRelInfo, CMD_UPDATE,
2656 : oldSlot, slot, context->planSlot);
2657 :
2658 310360 : return NULL;
2659 : }
2660 :
2661 : /*
2662 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2663 : *
2664 : * Try to lock tuple for update as part of speculative insertion. If
2665 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2666 : * (but still lock row, even though it may not satisfy estate's
2667 : * snapshot).
2668 : *
2669 : * Returns true if we're done (with or without an update), or false if
2670 : * the caller must retry the INSERT from scratch.
2671 : */
2672 : static bool
2673 5206 : ExecOnConflictUpdate(ModifyTableContext *context,
2674 : ResultRelInfo *resultRelInfo,
2675 : ItemPointer conflictTid,
2676 : TupleTableSlot *excludedSlot,
2677 : bool canSetTag,
2678 : TupleTableSlot **returning)
2679 : {
2680 5206 : ModifyTableState *mtstate = context->mtstate;
2681 5206 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2682 5206 : Relation relation = resultRelInfo->ri_RelationDesc;
2683 5206 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2684 5206 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2685 : TM_FailureData tmfd;
2686 : LockTupleMode lockmode;
2687 : TM_Result test;
2688 : Datum xminDatum;
2689 : TransactionId xmin;
2690 : bool isnull;
2691 :
2692 : /*
2693 : * Parse analysis should have blocked ON CONFLICT for all system
2694 : * relations, which includes these. There's no fundamental obstacle to
2695 : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2696 : * ExecUpdate() caller.
2697 : */
2698 : Assert(!resultRelInfo->ri_needLockTagTuple);
2699 :
2700 : /* Determine lock mode to use */
2701 5206 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2702 :
2703 : /*
2704 : * Lock tuple for update. Don't follow updates when tuple cannot be
2705 : * locked without doing so. A row locking conflict here means our
2706 : * previous conclusion that the tuple is conclusively committed is not
2707 : * true anymore.
2708 : */
2709 5206 : test = table_tuple_lock(relation, conflictTid,
2710 5206 : context->estate->es_snapshot,
2711 5206 : existing, context->estate->es_output_cid,
2712 : lockmode, LockWaitBlock, 0,
2713 : &tmfd);
2714 5206 : switch (test)
2715 : {
2716 5182 : case TM_Ok:
2717 : /* success! */
2718 5182 : break;
2719 :
2720 24 : case TM_Invisible:
2721 :
2722 : /*
2723 : * This can occur when a just inserted tuple is updated again in
2724 : * the same command. E.g. because multiple rows with the same
2725 : * conflicting key values are inserted.
2726 : *
2727 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2728 : * case. We do not want to proceed because it would lead to the
2729 : * same row being updated a second time in some unspecified order,
2730 : * and in contrast to plain UPDATEs there's no historical behavior
2731 : * to break.
2732 : *
2733 : * It is the user's responsibility to prevent this situation from
2734 : * occurring. These problems are why the SQL standard similarly
2735 : * specifies that for SQL MERGE, an exception must be raised in
2736 : * the event of an attempt to update the same row twice.
2737 : */
2738 24 : xminDatum = slot_getsysattr(existing,
2739 : MinTransactionIdAttributeNumber,
2740 : &isnull);
2741 : Assert(!isnull);
2742 24 : xmin = DatumGetTransactionId(xminDatum);
2743 :
2744 24 : if (TransactionIdIsCurrentTransactionId(xmin))
2745 24 : ereport(ERROR,
2746 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2747 : /* translator: %s is a SQL command name */
2748 : errmsg("%s command cannot affect row a second time",
2749 : "ON CONFLICT DO UPDATE"),
2750 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2751 :
2752 : /* This shouldn't happen */
2753 0 : elog(ERROR, "attempted to lock invisible tuple");
2754 : break;
2755 :
2756 0 : case TM_SelfModified:
2757 :
2758 : /*
2759 : * This state should never be reached. As a dirty snapshot is used
2760 : * to find conflicting tuples, speculative insertion wouldn't have
2761 : * seen this row to conflict with.
2762 : */
2763 0 : elog(ERROR, "unexpected self-updated tuple");
2764 : break;
2765 :
2766 0 : case TM_Updated:
2767 0 : if (IsolationUsesXactSnapshot())
2768 0 : ereport(ERROR,
2769 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2770 : errmsg("could not serialize access due to concurrent update")));
2771 :
2772 : /*
2773 : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2774 : * a partitioned table we shouldn't reach to a case where tuple to
2775 : * be lock is moved to another partition due to concurrent update
2776 : * of the partition key.
2777 : */
2778 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2779 :
2780 : /*
2781 : * Tell caller to try again from the very start.
2782 : *
2783 : * It does not make sense to use the usual EvalPlanQual() style
2784 : * loop here, as the new version of the row might not conflict
2785 : * anymore, or the conflicting tuple has actually been deleted.
2786 : */
2787 0 : ExecClearTuple(existing);
2788 0 : return false;
2789 :
2790 0 : case TM_Deleted:
2791 0 : if (IsolationUsesXactSnapshot())
2792 0 : ereport(ERROR,
2793 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2794 : errmsg("could not serialize access due to concurrent delete")));
2795 :
2796 : /* see TM_Updated case */
2797 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2798 0 : ExecClearTuple(existing);
2799 0 : return false;
2800 :
2801 0 : default:
2802 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2803 : }
2804 :
2805 : /* Success, the tuple is locked. */
2806 :
2807 : /*
2808 : * Verify that the tuple is visible to our MVCC snapshot if the current
2809 : * isolation level mandates that.
2810 : *
2811 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2812 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2813 : *
2814 : * This means we only ever continue when a new command in the current
2815 : * transaction could see the row, even though in READ COMMITTED mode the
2816 : * tuple will not be visible according to the current statement's
2817 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2818 : * versions.
2819 : */
2820 5182 : ExecCheckTupleVisible(context->estate, relation, existing);
2821 :
2822 : /*
2823 : * Make tuple and any needed join variables available to ExecQual and
2824 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2825 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2826 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2827 : * other redirection.
2828 : */
2829 5182 : econtext->ecxt_scantuple = existing;
2830 5182 : econtext->ecxt_innertuple = excludedSlot;
2831 5182 : econtext->ecxt_outertuple = NULL;
2832 :
2833 5182 : if (!ExecQual(onConflictSetWhere, econtext))
2834 : {
2835 32 : ExecClearTuple(existing); /* see return below */
2836 32 : InstrCountFiltered1(&mtstate->ps, 1);
2837 32 : return true; /* done with the tuple */
2838 : }
2839 :
2840 5150 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2841 : {
2842 : /*
2843 : * Check target's existing tuple against UPDATE-applicable USING
2844 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2845 : *
2846 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2847 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2848 : * but that's almost the extent of its special handling for ON
2849 : * CONFLICT DO UPDATE.
2850 : *
2851 : * The rewriter will also have associated UPDATE applicable straight
2852 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2853 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2854 : * kinds, so there is no danger of spurious over-enforcement in the
2855 : * INSERT or UPDATE path.
2856 : */
2857 60 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2858 : existing,
2859 : mtstate->ps.state);
2860 : }
2861 :
2862 : /* Project the new tuple version */
2863 5126 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2864 :
2865 : /*
2866 : * Note that it is possible that the target tuple has been modified in
2867 : * this session, after the above table_tuple_lock. We choose to not error
2868 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2869 : * This can happen if an UPDATE is triggered from within ExecQual(),
2870 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2871 : * wCTE in the ON CONFLICT's SET.
2872 : */
2873 :
2874 : /* Execute UPDATE with projection */
2875 10222 : *returning = ExecUpdate(context, resultRelInfo,
2876 : conflictTid, NULL, existing,
2877 5126 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2878 : canSetTag);
2879 :
2880 : /*
2881 : * Clear out existing tuple, as there might not be another conflict among
2882 : * the next input rows. Don't want to hold resources till the end of the
2883 : * query. First though, make sure that the returning slot, if any, has a
2884 : * local copy of any OLD pass-by-reference values, if it refers to any OLD
2885 : * columns.
2886 : */
2887 5096 : if (*returning != NULL &&
2888 222 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
2889 6 : ExecMaterializeSlot(*returning);
2890 :
2891 5096 : ExecClearTuple(existing);
2892 :
2893 5096 : return true;
2894 : }
2895 :
2896 : /*
2897 : * Perform MERGE.
2898 : */
2899 : static TupleTableSlot *
2900 14256 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2901 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2902 : {
2903 14256 : TupleTableSlot *rslot = NULL;
2904 : bool matched;
2905 :
2906 : /*-----
2907 : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2908 : * valid, depending on whether the result relation is a table or a view.
2909 : * We execute the first action for which the additional WHEN MATCHED AND
2910 : * quals pass. If an action without quals is found, that action is
2911 : * executed.
2912 : *
2913 : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2914 : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2915 : * in sequence until one passes. This is almost identical to the WHEN
2916 : * MATCHED case, and both cases are handled by ExecMergeMatched().
2917 : *
2918 : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2919 : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2920 : * TARGET] actions in sequence until one passes.
2921 : *
2922 : * Things get interesting in case of concurrent update/delete of the
2923 : * target tuple. Such concurrent update/delete is detected while we are
2924 : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2925 : *
2926 : * A concurrent update can:
2927 : *
2928 : * 1. modify the target tuple so that the results from checking any
2929 : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2930 : * SOURCE actions potentially change, but the result from the join
2931 : * quals does not change.
2932 : *
2933 : * In this case, we are still dealing with the same kind of match
2934 : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2935 : * actions from the start and choose the first one that satisfies the
2936 : * new target tuple.
2937 : *
2938 : * 2. modify the target tuple in the WHEN MATCHED case so that the join
2939 : * quals no longer pass and hence the source and target tuples no
2940 : * longer match.
2941 : *
2942 : * In this case, we are now dealing with a NOT MATCHED case, and we
2943 : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2944 : * TARGET] actions. First ExecMergeMatched() processes the list of
2945 : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2946 : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2947 : * TARGET] actions in sequence until one passes. Thus we may execute
2948 : * two actions; one of each kind.
2949 : *
2950 : * Thus we support concurrent updates that turn MATCHED candidate rows
2951 : * into NOT MATCHED rows. However, we do not attempt to support cases
2952 : * that would turn NOT MATCHED rows into MATCHED rows, or which would
2953 : * cause a target row to match a different source row.
2954 : *
2955 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2956 : * [BY TARGET].
2957 : *
2958 : * ExecMergeMatched() takes care of following the update chain and
2959 : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
2960 : * action, as long as the target tuple still exists. If the target tuple
2961 : * gets deleted or a concurrent update causes the join quals to fail, it
2962 : * returns a matched status of false and we call ExecMergeNotMatched().
2963 : * Given that ExecMergeMatched() always makes progress by following the
2964 : * update chain and we never switch from ExecMergeNotMatched() to
2965 : * ExecMergeMatched(), there is no risk of a livelock.
2966 : */
2967 14256 : matched = tupleid != NULL || oldtuple != NULL;
2968 14256 : if (matched)
2969 11632 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
2970 : canSetTag, &matched);
2971 :
2972 : /*
2973 : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
2974 : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
2975 : * "matched" to false, indicating that it no longer matches).
2976 : */
2977 14166 : if (!matched)
2978 : {
2979 : /*
2980 : * If a concurrent update turned a MATCHED case into a NOT MATCHED
2981 : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
2982 : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
2983 : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
2984 : * SOURCE action, and computed the row to return. If so, we cannot
2985 : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
2986 : * pending (to be processed on the next call to ExecModifyTable()).
2987 : * Otherwise, just process the action now.
2988 : */
2989 2640 : if (rslot == NULL)
2990 2638 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
2991 : else
2992 2 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
2993 : }
2994 :
2995 14112 : return rslot;
2996 : }
2997 :
2998 : /*
2999 : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3000 : * action, depending on whether the join quals are satisfied. If the target
3001 : * relation is a table, the current target tuple is identified by tupleid.
3002 : * Otherwise, if the target relation is a view, oldtuple is the current target
3003 : * tuple from the view.
3004 : *
3005 : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3006 : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3007 : * action do not pass, we check the second, then the third and so on. If we
3008 : * reach the end without finding a qualifying action, we return NULL.
3009 : * Otherwise, we execute the qualifying action and return its RETURNING
3010 : * result, if any, or NULL.
3011 : *
3012 : * On entry, "*matched" is assumed to be true. If a concurrent update or
3013 : * delete is detected that causes the join quals to no longer pass, we set it
3014 : * to false, indicating that the caller should process any NOT MATCHED [BY
3015 : * TARGET] actions.
3016 : *
3017 : * After a concurrent update, we restart from the first action to look for a
3018 : * new qualifying action to execute. If the join quals originally passed, and
3019 : * the concurrent update caused them to no longer pass, then we switch from
3020 : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3021 : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3022 : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3023 : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3024 : */
3025 : static TupleTableSlot *
3026 11632 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3027 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3028 : bool *matched)
3029 : {
3030 11632 : ModifyTableState *mtstate = context->mtstate;
3031 11632 : List **mergeActions = resultRelInfo->ri_MergeActions;
3032 : ItemPointerData lockedtid;
3033 : List *actionStates;
3034 11632 : TupleTableSlot *newslot = NULL;
3035 11632 : TupleTableSlot *rslot = NULL;
3036 11632 : EState *estate = context->estate;
3037 11632 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3038 : bool isNull;
3039 11632 : EPQState *epqstate = &mtstate->mt_epqstate;
3040 : ListCell *l;
3041 :
3042 : /* Expect matched to be true on entry */
3043 : Assert(*matched);
3044 :
3045 : /*
3046 : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3047 : * are done.
3048 : */
3049 11632 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3050 1200 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
3051 528 : return NULL;
3052 :
3053 : /*
3054 : * Make tuple and any needed join variables available to ExecQual and
3055 : * ExecProject. The target's existing tuple is installed in the scantuple.
3056 : * This target relation's slot is required only in the case of a MATCHED
3057 : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3058 : */
3059 11104 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3060 11104 : econtext->ecxt_innertuple = context->planSlot;
3061 11104 : econtext->ecxt_outertuple = NULL;
3062 :
3063 : /*
3064 : * This routine is only invoked for matched target rows, so we should
3065 : * either have the tupleid of the target row, or an old tuple from the
3066 : * target wholerow junk attr.
3067 : */
3068 : Assert(tupleid != NULL || oldtuple != NULL);
3069 11104 : ItemPointerSetInvalid(&lockedtid);
3070 11104 : if (oldtuple != NULL)
3071 : {
3072 : Assert(!resultRelInfo->ri_needLockTagTuple);
3073 96 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3074 : false);
3075 : }
3076 : else
3077 : {
3078 11008 : if (resultRelInfo->ri_needLockTagTuple)
3079 : {
3080 : /*
3081 : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3082 : * that don't match mas_whenqual. MERGE on system catalogs is a
3083 : * minor use case, so don't bother optimizing those.
3084 : */
3085 7210 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3086 : InplaceUpdateTupleLock);
3087 7210 : lockedtid = *tupleid;
3088 : }
3089 11008 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
3090 : tupleid,
3091 : SnapshotAny,
3092 : resultRelInfo->ri_oldTupleSlot))
3093 0 : elog(ERROR, "failed to fetch the target tuple");
3094 : }
3095 :
3096 : /*
3097 : * Test the join condition. If it's satisfied, perform a MATCHED action.
3098 : * Otherwise, perform a NOT MATCHED BY SOURCE action.
3099 : *
3100 : * Note that this join condition will be NULL if there are no NOT MATCHED
3101 : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3102 : * need only consider MATCHED actions here.
3103 : */
3104 11104 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3105 10922 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
3106 : else
3107 182 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3108 :
3109 11104 : lmerge_matched:
3110 :
3111 19830 : foreach(l, actionStates)
3112 : {
3113 11232 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
3114 11232 : CmdType commandType = relaction->mas_action->commandType;
3115 : TM_Result result;
3116 11232 : UpdateContext updateCxt = {0};
3117 :
3118 : /*
3119 : * Test condition, if any.
3120 : *
3121 : * In the absence of any condition, we perform the action
3122 : * unconditionally (no need to check separately since ExecQual() will
3123 : * return true if there are no conditions to evaluate).
3124 : */
3125 11232 : if (!ExecQual(relaction->mas_whenqual, econtext))
3126 8662 : continue;
3127 :
3128 : /*
3129 : * Check if the existing target tuple meets the USING checks of
3130 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3131 : * error.
3132 : *
3133 : * The WITH CHECK quals for UPDATE RLS policies are applied in
3134 : * ExecUpdateAct() and hence we need not do anything special to handle
3135 : * them.
3136 : *
3137 : * NOTE: We must do this after WHEN quals are evaluated, so that we
3138 : * check policies only when they matter.
3139 : */
3140 2570 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3141 : {
3142 90 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
3143 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
3144 : resultRelInfo,
3145 : resultRelInfo->ri_oldTupleSlot,
3146 90 : context->mtstate->ps.state);
3147 : }
3148 :
3149 : /* Perform stated action */
3150 2546 : switch (commandType)
3151 : {
3152 2128 : case CMD_UPDATE:
3153 :
3154 : /*
3155 : * Project the output tuple, and use that to update the table.
3156 : * We don't need to filter out junk attributes, because the
3157 : * UPDATE action's targetlist doesn't have any.
3158 : */
3159 2128 : newslot = ExecProject(relaction->mas_proj);
3160 :
3161 2128 : mtstate->mt_merge_action = relaction;
3162 2128 : if (!ExecUpdatePrologue(context, resultRelInfo,
3163 : tupleid, NULL, newslot, &result))
3164 : {
3165 18 : if (result == TM_Ok)
3166 156 : goto out; /* "do nothing" */
3167 :
3168 12 : break; /* concurrent update/delete */
3169 : }
3170 :
3171 : /* INSTEAD OF ROW UPDATE Triggers */
3172 2110 : if (resultRelInfo->ri_TrigDesc &&
3173 334 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3174 : {
3175 78 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3176 : oldtuple, newslot))
3177 0 : goto out; /* "do nothing" */
3178 : }
3179 : else
3180 : {
3181 : /* checked ri_needLockTagTuple above */
3182 : Assert(oldtuple == NULL);
3183 :
3184 2032 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3185 : NULL, newslot, canSetTag,
3186 : &updateCxt);
3187 :
3188 : /*
3189 : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3190 : * cross-partition update was done, then there's nothing
3191 : * else for us to do --- the UPDATE has been turned into a
3192 : * DELETE and an INSERT, and we must not perform any of
3193 : * the usual post-update tasks. Also, the RETURNING tuple
3194 : * (if any) has been projected, so we can just return
3195 : * that.
3196 : */
3197 2012 : if (updateCxt.crossPartUpdate)
3198 : {
3199 134 : mtstate->mt_merge_updated += 1;
3200 134 : rslot = context->cpUpdateReturningSlot;
3201 134 : goto out;
3202 : }
3203 : }
3204 :
3205 1956 : if (result == TM_Ok)
3206 : {
3207 1884 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3208 : tupleid, NULL, newslot);
3209 1872 : mtstate->mt_merge_updated += 1;
3210 : }
3211 1944 : break;
3212 :
3213 388 : case CMD_DELETE:
3214 388 : mtstate->mt_merge_action = relaction;
3215 388 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3216 : NULL, NULL, &result))
3217 : {
3218 12 : if (result == TM_Ok)
3219 6 : goto out; /* "do nothing" */
3220 :
3221 6 : break; /* concurrent update/delete */
3222 : }
3223 :
3224 : /* INSTEAD OF ROW DELETE Triggers */
3225 376 : if (resultRelInfo->ri_TrigDesc &&
3226 44 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3227 : {
3228 6 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3229 : oldtuple))
3230 0 : goto out; /* "do nothing" */
3231 : }
3232 : else
3233 : {
3234 : /* checked ri_needLockTagTuple above */
3235 : Assert(oldtuple == NULL);
3236 :
3237 370 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3238 : false);
3239 : }
3240 :
3241 376 : if (result == TM_Ok)
3242 : {
3243 358 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3244 : false);
3245 358 : mtstate->mt_merge_deleted += 1;
3246 : }
3247 376 : break;
3248 :
3249 30 : case CMD_NOTHING:
3250 : /* Doing nothing is always OK */
3251 30 : result = TM_Ok;
3252 30 : break;
3253 :
3254 0 : default:
3255 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3256 : }
3257 :
3258 2368 : switch (result)
3259 : {
3260 2260 : case TM_Ok:
3261 : /* all good; perform final actions */
3262 2260 : if (canSetTag && commandType != CMD_NOTHING)
3263 2212 : (estate->es_processed)++;
3264 :
3265 2260 : break;
3266 :
3267 32 : case TM_SelfModified:
3268 :
3269 : /*
3270 : * The target tuple was already updated or deleted by the
3271 : * current command, or by a later command in the current
3272 : * transaction. The former case is explicitly disallowed by
3273 : * the SQL standard for MERGE, which insists that the MERGE
3274 : * join condition should not join a target row to more than
3275 : * one source row.
3276 : *
3277 : * The latter case arises if the tuple is modified by a
3278 : * command in a BEFORE trigger, or perhaps by a command in a
3279 : * volatile function used in the query. In such situations we
3280 : * should not ignore the MERGE action, but it is equally
3281 : * unsafe to proceed. We don't want to discard the original
3282 : * MERGE action while keeping the triggered actions based on
3283 : * it; and it would be no better to allow the original MERGE
3284 : * action while discarding the updates that it triggered. So
3285 : * throwing an error is the only safe course.
3286 : */
3287 32 : if (context->tmfd.cmax != estate->es_output_cid)
3288 12 : ereport(ERROR,
3289 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3290 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3291 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3292 :
3293 20 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3294 20 : ereport(ERROR,
3295 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3296 : /* translator: %s is a SQL command name */
3297 : errmsg("%s command cannot affect row a second time",
3298 : "MERGE"),
3299 : errhint("Ensure that not more than one source row matches any one target row.")));
3300 :
3301 : /* This shouldn't happen */
3302 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3303 : break;
3304 :
3305 10 : case TM_Deleted:
3306 10 : if (IsolationUsesXactSnapshot())
3307 0 : ereport(ERROR,
3308 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3309 : errmsg("could not serialize access due to concurrent delete")));
3310 :
3311 : /*
3312 : * If the tuple was already deleted, set matched to false to
3313 : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3314 : */
3315 10 : *matched = false;
3316 10 : goto out;
3317 :
3318 66 : case TM_Updated:
3319 : {
3320 : bool was_matched;
3321 : Relation resultRelationDesc;
3322 : TupleTableSlot *epqslot,
3323 : *inputslot;
3324 : LockTupleMode lockmode;
3325 :
3326 : /*
3327 : * The target tuple was concurrently updated by some other
3328 : * transaction. If we are currently processing a MATCHED
3329 : * action, use EvalPlanQual() with the new version of the
3330 : * tuple and recheck the join qual, to detect a change
3331 : * from the MATCHED to the NOT MATCHED cases. If we are
3332 : * already processing a NOT MATCHED BY SOURCE action, we
3333 : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3334 : * MATCHED).
3335 : */
3336 66 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3337 66 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3338 66 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3339 :
3340 66 : if (was_matched)
3341 66 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3342 : resultRelInfo->ri_RangeTableIndex);
3343 : else
3344 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3345 :
3346 66 : result = table_tuple_lock(resultRelationDesc, tupleid,
3347 : estate->es_snapshot,
3348 : inputslot, estate->es_output_cid,
3349 : lockmode, LockWaitBlock,
3350 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3351 : &context->tmfd);
3352 66 : switch (result)
3353 : {
3354 64 : case TM_Ok:
3355 :
3356 : /*
3357 : * If the tuple was updated and migrated to
3358 : * another partition concurrently, the current
3359 : * MERGE implementation can't follow. There's
3360 : * probably a better way to handle this case, but
3361 : * it'd require recognizing the relation to which
3362 : * the tuple moved, and setting our current
3363 : * resultRelInfo to that.
3364 : */
3365 64 : if (ItemPointerIndicatesMovedPartitions(&context->tmfd.ctid))
3366 0 : ereport(ERROR,
3367 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3368 : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3369 :
3370 : /*
3371 : * If this was a MATCHED case, use EvalPlanQual()
3372 : * to recheck the join condition.
3373 : */
3374 64 : if (was_matched)
3375 : {
3376 64 : epqslot = EvalPlanQual(epqstate,
3377 : resultRelationDesc,
3378 : resultRelInfo->ri_RangeTableIndex,
3379 : inputslot);
3380 :
3381 : /*
3382 : * If the subplan didn't return a tuple, then
3383 : * we must be dealing with an inner join for
3384 : * which the join condition no longer matches.
3385 : * This can only happen if there are no NOT
3386 : * MATCHED actions, and so there is nothing
3387 : * more to do.
3388 : */
3389 64 : if (TupIsNull(epqslot))
3390 0 : goto out;
3391 :
3392 : /*
3393 : * If we got a NULL ctid from the subplan, the
3394 : * join quals no longer pass and we switch to
3395 : * the NOT MATCHED BY SOURCE case.
3396 : */
3397 64 : (void) ExecGetJunkAttribute(epqslot,
3398 64 : resultRelInfo->ri_RowIdAttNo,
3399 : &isNull);
3400 64 : if (isNull)
3401 4 : *matched = false;
3402 :
3403 : /*
3404 : * Otherwise, recheck the join quals to see if
3405 : * we need to switch to the NOT MATCHED BY
3406 : * SOURCE case.
3407 : */
3408 64 : if (resultRelInfo->ri_needLockTagTuple)
3409 : {
3410 2 : if (ItemPointerIsValid(&lockedtid))
3411 2 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3412 : InplaceUpdateTupleLock);
3413 2 : LockTuple(resultRelInfo->ri_RelationDesc, &context->tmfd.ctid,
3414 : InplaceUpdateTupleLock);
3415 2 : lockedtid = context->tmfd.ctid;
3416 : }
3417 64 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3418 : &context->tmfd.ctid,
3419 : SnapshotAny,
3420 : resultRelInfo->ri_oldTupleSlot))
3421 0 : elog(ERROR, "failed to fetch the target tuple");
3422 :
3423 64 : if (*matched)
3424 60 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3425 : econtext);
3426 :
3427 : /* Switch lists, if necessary */
3428 64 : if (!*matched)
3429 6 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3430 : }
3431 :
3432 : /*
3433 : * Loop back and process the MATCHED or NOT
3434 : * MATCHED BY SOURCE actions from the start.
3435 : */
3436 64 : goto lmerge_matched;
3437 :
3438 0 : case TM_Deleted:
3439 :
3440 : /*
3441 : * tuple already deleted; tell caller to run NOT
3442 : * MATCHED [BY TARGET] actions
3443 : */
3444 0 : *matched = false;
3445 0 : goto out;
3446 :
3447 2 : case TM_SelfModified:
3448 :
3449 : /*
3450 : * This can be reached when following an update
3451 : * chain from a tuple updated by another session,
3452 : * reaching a tuple that was already updated or
3453 : * deleted by the current command, or by a later
3454 : * command in the current transaction. As above,
3455 : * this should always be treated as an error.
3456 : */
3457 2 : if (context->tmfd.cmax != estate->es_output_cid)
3458 0 : ereport(ERROR,
3459 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3460 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3461 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3462 :
3463 2 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3464 2 : ereport(ERROR,
3465 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3466 : /* translator: %s is a SQL command name */
3467 : errmsg("%s command cannot affect row a second time",
3468 : "MERGE"),
3469 : errhint("Ensure that not more than one source row matches any one target row.")));
3470 :
3471 : /* This shouldn't happen */
3472 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3473 : goto out;
3474 :
3475 0 : default:
3476 : /* see table_tuple_lock call in ExecDelete() */
3477 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3478 : result);
3479 : goto out;
3480 : }
3481 : }
3482 :
3483 0 : case TM_Invisible:
3484 : case TM_WouldBlock:
3485 : case TM_BeingModified:
3486 : /* these should not occur */
3487 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3488 : break;
3489 : }
3490 :
3491 : /* Process RETURNING if present */
3492 2260 : if (resultRelInfo->ri_projectReturning)
3493 : {
3494 270 : switch (commandType)
3495 : {
3496 162 : case CMD_UPDATE:
3497 162 : rslot = ExecProcessReturning(context,
3498 : resultRelInfo,
3499 : CMD_UPDATE,
3500 : resultRelInfo->ri_oldTupleSlot,
3501 : newslot,
3502 : context->planSlot);
3503 162 : break;
3504 :
3505 108 : case CMD_DELETE:
3506 108 : rslot = ExecProcessReturning(context,
3507 : resultRelInfo,
3508 : CMD_DELETE,
3509 : resultRelInfo->ri_oldTupleSlot,
3510 : NULL,
3511 : context->planSlot);
3512 108 : break;
3513 :
3514 0 : case CMD_NOTHING:
3515 0 : break;
3516 :
3517 0 : default:
3518 0 : elog(ERROR, "unrecognized commandType: %d",
3519 : (int) commandType);
3520 : }
3521 : }
3522 :
3523 : /*
3524 : * We've activated one of the WHEN clauses, so we don't search
3525 : * further. This is required behaviour, not an optimization.
3526 : */
3527 2260 : break;
3528 : }
3529 :
3530 : /*
3531 : * Successfully executed an action or no qualifying action was found.
3532 : */
3533 11014 : out:
3534 11014 : if (ItemPointerIsValid(&lockedtid))
3535 7210 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3536 : InplaceUpdateTupleLock);
3537 11014 : return rslot;
3538 : }
3539 :
3540 : /*
3541 : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3542 : */
3543 : static TupleTableSlot *
3544 2640 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3545 : bool canSetTag)
3546 : {
3547 2640 : ModifyTableState *mtstate = context->mtstate;
3548 2640 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3549 : List *actionStates;
3550 2640 : TupleTableSlot *rslot = NULL;
3551 : ListCell *l;
3552 :
3553 : /*
3554 : * For INSERT actions, the root relation's merge action is OK since the
3555 : * INSERT's targetlist and the WHEN conditions can only refer to the
3556 : * source relation and hence it does not matter which result relation we
3557 : * work with.
3558 : *
3559 : * XXX does this mean that we can avoid creating copies of actionStates on
3560 : * partitioned tables, for not-matched actions?
3561 : */
3562 2640 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3563 :
3564 : /*
3565 : * Make source tuple available to ExecQual and ExecProject. We don't need
3566 : * the target tuple, since the WHEN quals and targetlist can't refer to
3567 : * the target columns.
3568 : */
3569 2640 : econtext->ecxt_scantuple = NULL;
3570 2640 : econtext->ecxt_innertuple = context->planSlot;
3571 2640 : econtext->ecxt_outertuple = NULL;
3572 :
3573 3510 : foreach(l, actionStates)
3574 : {
3575 2640 : MergeActionState *action = (MergeActionState *) lfirst(l);
3576 2640 : CmdType commandType = action->mas_action->commandType;
3577 : TupleTableSlot *newslot;
3578 :
3579 : /*
3580 : * Test condition, if any.
3581 : *
3582 : * In the absence of any condition, we perform the action
3583 : * unconditionally (no need to check separately since ExecQual() will
3584 : * return true if there are no conditions to evaluate).
3585 : */
3586 2640 : if (!ExecQual(action->mas_whenqual, econtext))
3587 870 : continue;
3588 :
3589 : /* Perform stated action */
3590 1770 : switch (commandType)
3591 : {
3592 1770 : case CMD_INSERT:
3593 :
3594 : /*
3595 : * Project the tuple. In case of a partitioned table, the
3596 : * projection was already built to use the root's descriptor,
3597 : * so we don't need to map the tuple here.
3598 : */
3599 1770 : newslot = ExecProject(action->mas_proj);
3600 1770 : mtstate->mt_merge_action = action;
3601 :
3602 1770 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3603 : newslot, canSetTag, NULL, NULL);
3604 1716 : mtstate->mt_merge_inserted += 1;
3605 1716 : break;
3606 0 : case CMD_NOTHING:
3607 : /* Do nothing */
3608 0 : break;
3609 0 : default:
3610 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3611 : }
3612 :
3613 : /*
3614 : * We've activated one of the WHEN clauses, so we don't search
3615 : * further. This is required behaviour, not an optimization.
3616 : */
3617 1716 : break;
3618 : }
3619 :
3620 2586 : return rslot;
3621 : }
3622 :
3623 : /*
3624 : * Initialize state for execution of MERGE.
3625 : */
3626 : void
3627 1432 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3628 : {
3629 1432 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3630 1432 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3631 : ResultRelInfo *resultRelInfo;
3632 : ExprContext *econtext;
3633 : ListCell *lc;
3634 : int i;
3635 :
3636 1432 : if (node->mergeActionLists == NIL)
3637 0 : return;
3638 :
3639 1432 : mtstate->mt_merge_subcommands = 0;
3640 :
3641 1432 : if (mtstate->ps.ps_ExprContext == NULL)
3642 1254 : ExecAssignExprContext(estate, &mtstate->ps);
3643 1432 : econtext = mtstate->ps.ps_ExprContext;
3644 :
3645 : /*
3646 : * Create a MergeActionState for each action on the mergeActionList and
3647 : * add it to either a list of matched actions or not-matched actions.
3648 : *
3649 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3650 : * anything here, do so there too.
3651 : */
3652 1432 : i = 0;
3653 3096 : foreach(lc, node->mergeActionLists)
3654 : {
3655 1664 : List *mergeActionList = lfirst(lc);
3656 : Node *joinCondition;
3657 : TupleDesc relationDesc;
3658 : ListCell *l;
3659 :
3660 1664 : joinCondition = (Node *) list_nth(node->mergeJoinConditions, i);
3661 1664 : resultRelInfo = mtstate->resultRelInfo + i;
3662 1664 : i++;
3663 1664 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3664 :
3665 : /* initialize slots for MERGE fetches from this rel */
3666 1664 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3667 1664 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3668 :
3669 : /* initialize state for join condition checking */
3670 1664 : resultRelInfo->ri_MergeJoinCondition =
3671 1664 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3672 :
3673 4640 : foreach(l, mergeActionList)
3674 : {
3675 2976 : MergeAction *action = (MergeAction *) lfirst(l);
3676 : MergeActionState *action_state;
3677 : TupleTableSlot *tgtslot;
3678 : TupleDesc tgtdesc;
3679 :
3680 : /*
3681 : * Build action merge state for this rel. (For partitions,
3682 : * equivalent code exists in ExecInitPartitionInfo.)
3683 : */
3684 2976 : action_state = makeNode(MergeActionState);
3685 2976 : action_state->mas_action = action;
3686 2976 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3687 : &mtstate->ps);
3688 :
3689 : /*
3690 : * We create three lists - one for each MergeMatchKind - and stick
3691 : * the MergeActionState into the appropriate list.
3692 : */
3693 5952 : resultRelInfo->ri_MergeActions[action->matchKind] =
3694 2976 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3695 : action_state);
3696 :
3697 2976 : switch (action->commandType)
3698 : {
3699 980 : case CMD_INSERT:
3700 980 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3701 : action->targetList);
3702 :
3703 : /*
3704 : * If the MERGE targets a partitioned table, any INSERT
3705 : * actions must be routed through it, not the child
3706 : * relations. Initialize the routing struct and the root
3707 : * table's "new" tuple slot for that, if not already done.
3708 : * The projection we prepare, for all relations, uses the
3709 : * root relation descriptor, and targets the plan's root
3710 : * slot. (This is consistent with the fact that we
3711 : * checked the plan output to match the root relation,
3712 : * above.)
3713 : */
3714 980 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3715 : RELKIND_PARTITIONED_TABLE)
3716 : {
3717 298 : if (mtstate->mt_partition_tuple_routing == NULL)
3718 : {
3719 : /*
3720 : * Initialize planstate for routing if not already
3721 : * done.
3722 : *
3723 : * Note that the slot is managed as a standalone
3724 : * slot belonging to ModifyTableState, so we pass
3725 : * NULL for the 2nd argument.
3726 : */
3727 124 : mtstate->mt_root_tuple_slot =
3728 124 : table_slot_create(rootRelInfo->ri_RelationDesc,
3729 : NULL);
3730 124 : mtstate->mt_partition_tuple_routing =
3731 124 : ExecSetupPartitionTupleRouting(estate,
3732 : rootRelInfo->ri_RelationDesc);
3733 : }
3734 298 : tgtslot = mtstate->mt_root_tuple_slot;
3735 298 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3736 : }
3737 : else
3738 : {
3739 : /* not partitioned? use the stock relation and slot */
3740 682 : tgtslot = resultRelInfo->ri_newTupleSlot;
3741 682 : tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3742 : }
3743 :
3744 980 : action_state->mas_proj =
3745 980 : ExecBuildProjectionInfo(action->targetList, econtext,
3746 : tgtslot,
3747 : &mtstate->ps,
3748 : tgtdesc);
3749 :
3750 980 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3751 980 : break;
3752 1522 : case CMD_UPDATE:
3753 1522 : action_state->mas_proj =
3754 1522 : ExecBuildUpdateProjection(action->targetList,
3755 : true,
3756 : action->updateColnos,
3757 : relationDesc,
3758 : econtext,
3759 : resultRelInfo->ri_newTupleSlot,
3760 : &mtstate->ps);
3761 1522 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3762 1522 : break;
3763 416 : case CMD_DELETE:
3764 416 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3765 416 : break;
3766 58 : case CMD_NOTHING:
3767 58 : break;
3768 0 : default:
3769 0 : elog(ERROR, "unknown operation");
3770 : break;
3771 : }
3772 : }
3773 : }
3774 : }
3775 :
3776 : /*
3777 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3778 : *
3779 : * We mark 'projectNewInfoValid' even though the projections themselves
3780 : * are not initialized here.
3781 : */
3782 : void
3783 1682 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3784 : ResultRelInfo *resultRelInfo)
3785 : {
3786 1682 : EState *estate = mtstate->ps.state;
3787 :
3788 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3789 :
3790 1682 : resultRelInfo->ri_oldTupleSlot =
3791 1682 : table_slot_create(resultRelInfo->ri_RelationDesc,
3792 : &estate->es_tupleTable);
3793 1682 : resultRelInfo->ri_newTupleSlot =
3794 1682 : table_slot_create(resultRelInfo->ri_RelationDesc,
3795 : &estate->es_tupleTable);
3796 1682 : resultRelInfo->ri_projectNewInfoValid = true;
3797 1682 : }
3798 :
3799 : /*
3800 : * Process BEFORE EACH STATEMENT triggers
3801 : */
3802 : static void
3803 118632 : fireBSTriggers(ModifyTableState *node)
3804 : {
3805 118632 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3806 118632 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3807 :
3808 118632 : switch (node->operation)
3809 : {
3810 92230 : case CMD_INSERT:
3811 92230 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3812 92218 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3813 834 : ExecBSUpdateTriggers(node->ps.state,
3814 : resultRelInfo);
3815 92218 : break;
3816 12932 : case CMD_UPDATE:
3817 12932 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3818 12932 : break;
3819 12158 : case CMD_DELETE:
3820 12158 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3821 12158 : break;
3822 1312 : case CMD_MERGE:
3823 1312 : if (node->mt_merge_subcommands & MERGE_INSERT)
3824 722 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3825 1312 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3826 930 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
3827 1312 : if (node->mt_merge_subcommands & MERGE_DELETE)
3828 344 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
3829 1312 : break;
3830 0 : default:
3831 0 : elog(ERROR, "unknown operation");
3832 : break;
3833 : }
3834 118620 : }
3835 :
3836 : /*
3837 : * Process AFTER EACH STATEMENT triggers
3838 : */
3839 : static void
3840 115458 : fireASTriggers(ModifyTableState *node)
3841 : {
3842 115458 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3843 115458 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3844 :
3845 115458 : switch (node->operation)
3846 : {
3847 90024 : case CMD_INSERT:
3848 90024 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3849 732 : ExecASUpdateTriggers(node->ps.state,
3850 : resultRelInfo,
3851 732 : node->mt_oc_transition_capture);
3852 90024 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3853 90024 : node->mt_transition_capture);
3854 90024 : break;
3855 12232 : case CMD_UPDATE:
3856 12232 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3857 12232 : node->mt_transition_capture);
3858 12232 : break;
3859 12034 : case CMD_DELETE:
3860 12034 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3861 12034 : node->mt_transition_capture);
3862 12034 : break;
3863 1168 : case CMD_MERGE:
3864 1168 : if (node->mt_merge_subcommands & MERGE_DELETE)
3865 308 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
3866 308 : node->mt_transition_capture);
3867 1168 : if (node->mt_merge_subcommands & MERGE_UPDATE)
3868 834 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
3869 834 : node->mt_transition_capture);
3870 1168 : if (node->mt_merge_subcommands & MERGE_INSERT)
3871 660 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
3872 660 : node->mt_transition_capture);
3873 1168 : break;
3874 0 : default:
3875 0 : elog(ERROR, "unknown operation");
3876 : break;
3877 : }
3878 115458 : }
3879 :
3880 : /*
3881 : * Set up the state needed for collecting transition tuples for AFTER
3882 : * triggers.
3883 : */
3884 : static void
3885 118962 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
3886 : {
3887 118962 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
3888 118962 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
3889 :
3890 : /* Check for transition tables on the directly targeted relation. */
3891 118962 : mtstate->mt_transition_capture =
3892 118962 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3893 118962 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3894 : mtstate->operation);
3895 118962 : if (plan->operation == CMD_INSERT &&
3896 92232 : plan->onConflictAction == ONCONFLICT_UPDATE)
3897 834 : mtstate->mt_oc_transition_capture =
3898 834 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
3899 834 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
3900 : CMD_UPDATE);
3901 118962 : }
3902 :
3903 : /*
3904 : * ExecPrepareTupleRouting --- prepare for routing one tuple
3905 : *
3906 : * Determine the partition in which the tuple in slot is to be inserted,
3907 : * and return its ResultRelInfo in *partRelInfo. The return value is
3908 : * a slot holding the tuple of the partition rowtype.
3909 : *
3910 : * This also sets the transition table information in mtstate based on the
3911 : * selected partition.
3912 : */
3913 : static TupleTableSlot *
3914 722210 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
3915 : EState *estate,
3916 : PartitionTupleRouting *proute,
3917 : ResultRelInfo *targetRelInfo,
3918 : TupleTableSlot *slot,
3919 : ResultRelInfo **partRelInfo)
3920 : {
3921 : ResultRelInfo *partrel;
3922 : TupleConversionMap *map;
3923 :
3924 : /*
3925 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
3926 : * not find a valid partition for the tuple in 'slot' then an error is
3927 : * raised. An error may also be raised if the found partition is not a
3928 : * valid target for INSERTs. This is required since a partitioned table
3929 : * UPDATE to another partition becomes a DELETE+INSERT.
3930 : */
3931 722210 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
3932 :
3933 : /*
3934 : * If we're capturing transition tuples, we might need to convert from the
3935 : * partition rowtype to root partitioned table's rowtype. But if there
3936 : * are no BEFORE triggers on the partition that could change the tuple, we
3937 : * can just remember the original unconverted tuple to avoid a needless
3938 : * round trip conversion.
3939 : */
3940 722006 : if (mtstate->mt_transition_capture != NULL)
3941 : {
3942 : bool has_before_insert_row_trig;
3943 :
3944 168 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
3945 42 : partrel->ri_TrigDesc->trig_insert_before_row);
3946 :
3947 126 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
3948 126 : !has_before_insert_row_trig ? slot : NULL;
3949 : }
3950 :
3951 : /*
3952 : * Convert the tuple, if necessary.
3953 : */
3954 722006 : map = ExecGetRootToChildMap(partrel, estate);
3955 722006 : if (map != NULL)
3956 : {
3957 68436 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
3958 :
3959 68436 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
3960 : }
3961 :
3962 722006 : *partRelInfo = partrel;
3963 722006 : return slot;
3964 : }
3965 :
3966 : /* ----------------------------------------------------------------
3967 : * ExecModifyTable
3968 : *
3969 : * Perform table modifications as required, and return RETURNING results
3970 : * if needed.
3971 : * ----------------------------------------------------------------
3972 : */
3973 : static TupleTableSlot *
3974 127172 : ExecModifyTable(PlanState *pstate)
3975 : {
3976 127172 : ModifyTableState *node = castNode(ModifyTableState, pstate);
3977 : ModifyTableContext context;
3978 127172 : EState *estate = node->ps.state;
3979 127172 : CmdType operation = node->operation;
3980 : ResultRelInfo *resultRelInfo;
3981 : PlanState *subplanstate;
3982 : TupleTableSlot *slot;
3983 : TupleTableSlot *oldSlot;
3984 : ItemPointerData tuple_ctid;
3985 : HeapTupleData oldtupdata;
3986 : HeapTuple oldtuple;
3987 : ItemPointer tupleid;
3988 : bool tuplock;
3989 :
3990 127172 : CHECK_FOR_INTERRUPTS();
3991 :
3992 : /*
3993 : * This should NOT get called during EvalPlanQual; we should have passed a
3994 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
3995 : * Assert because this condition is easy to miss in testing. (Note:
3996 : * although ModifyTable should not get executed within an EvalPlanQual
3997 : * operation, we do have to allow it to be initialized and shut down in
3998 : * case it is within a CTE subplan. Hence this test must be here, not in
3999 : * ExecInitModifyTable.)
4000 : */
4001 127172 : if (estate->es_epq_active != NULL)
4002 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4003 :
4004 : /*
4005 : * If we've already completed processing, don't try to do more. We need
4006 : * this test because ExecPostprocessPlan might call us an extra time, and
4007 : * our subplan's nodes aren't necessarily robust against being called
4008 : * extra times.
4009 : */
4010 127172 : if (node->mt_done)
4011 788 : return NULL;
4012 :
4013 : /*
4014 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
4015 : */
4016 126384 : if (node->fireBSTriggers)
4017 : {
4018 118632 : fireBSTriggers(node);
4019 118620 : node->fireBSTriggers = false;
4020 : }
4021 :
4022 : /* Preload local variables */
4023 126372 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4024 126372 : subplanstate = outerPlanState(node);
4025 :
4026 : /* Set global context */
4027 126372 : context.mtstate = node;
4028 126372 : context.epqstate = &node->mt_epqstate;
4029 126372 : context.estate = estate;
4030 :
4031 : /*
4032 : * Fetch rows from subplan, and execute the required table modification
4033 : * for each row.
4034 : */
4035 : for (;;)
4036 : {
4037 : /*
4038 : * Reset the per-output-tuple exprcontext. This is needed because
4039 : * triggers expect to use that context as workspace. It's a bit ugly
4040 : * to do this below the top level of the plan, however. We might need
4041 : * to rethink this later.
4042 : */
4043 13499210 : ResetPerTupleExprContext(estate);
4044 :
4045 : /*
4046 : * Reset per-tuple memory context used for processing on conflict and
4047 : * returning clauses, to free any expression evaluation storage
4048 : * allocated in the previous cycle.
4049 : */
4050 13499210 : if (pstate->ps_ExprContext)
4051 342578 : ResetExprContext(pstate->ps_ExprContext);
4052 :
4053 : /*
4054 : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4055 : * to execute, do so now --- see the comments in ExecMerge().
4056 : */
4057 13499210 : if (node->mt_merge_pending_not_matched != NULL)
4058 : {
4059 2 : context.planSlot = node->mt_merge_pending_not_matched;
4060 2 : context.cpDeletedSlot = NULL;
4061 :
4062 2 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4063 2 : node->canSetTag);
4064 :
4065 : /* Clear the pending action */
4066 2 : node->mt_merge_pending_not_matched = NULL;
4067 :
4068 : /*
4069 : * If we got a RETURNING result, return it to the caller. We'll
4070 : * continue the work on next call.
4071 : */
4072 2 : if (slot)
4073 2 : return slot;
4074 :
4075 0 : continue; /* continue with the next tuple */
4076 : }
4077 :
4078 : /* Fetch the next row from subplan */
4079 13499208 : context.planSlot = ExecProcNode(subplanstate);
4080 13498802 : context.cpDeletedSlot = NULL;
4081 :
4082 : /* No more tuples to process? */
4083 13498802 : if (TupIsNull(context.planSlot))
4084 : break;
4085 :
4086 : /*
4087 : * When there are multiple result relations, each tuple contains a
4088 : * junk column that gives the OID of the rel from which it came.
4089 : * Extract it and select the correct result relation.
4090 : */
4091 13383344 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
4092 : {
4093 : Datum datum;
4094 : bool isNull;
4095 : Oid resultoid;
4096 :
4097 5064 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4098 : &isNull);
4099 5064 : if (isNull)
4100 : {
4101 : /*
4102 : * For commands other than MERGE, any tuples having InvalidOid
4103 : * for tableoid are errors. For MERGE, we may need to handle
4104 : * them as WHEN NOT MATCHED clauses if any, so do that.
4105 : *
4106 : * Note that we use the node's toplevel resultRelInfo, not any
4107 : * specific partition's.
4108 : */
4109 466 : if (operation == CMD_MERGE)
4110 : {
4111 466 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4112 :
4113 466 : slot = ExecMerge(&context, node->resultRelInfo,
4114 466 : NULL, NULL, node->canSetTag);
4115 :
4116 : /*
4117 : * If we got a RETURNING result, return it to the caller.
4118 : * We'll continue the work on next call.
4119 : */
4120 460 : if (slot)
4121 20 : return slot;
4122 :
4123 440 : continue; /* continue with the next tuple */
4124 : }
4125 :
4126 0 : elog(ERROR, "tableoid is NULL");
4127 : }
4128 4598 : resultoid = DatumGetObjectId(datum);
4129 :
4130 : /* If it's not the same as last time, we need to locate the rel */
4131 4598 : if (resultoid != node->mt_lastResultOid)
4132 3142 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4133 : false, true);
4134 : }
4135 :
4136 : /*
4137 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4138 : * here is compute the RETURNING expressions.
4139 : */
4140 13382878 : if (resultRelInfo->ri_usesFdwDirectModify)
4141 : {
4142 : Assert(resultRelInfo->ri_projectReturning);
4143 :
4144 : /*
4145 : * A scan slot containing the data that was actually inserted,
4146 : * updated or deleted has already been made available to
4147 : * ExecProcessReturning by IterateDirectModify, so no need to
4148 : * provide it here. The individual old and new slots are not
4149 : * needed, since direct-modify is disabled if the RETURNING list
4150 : * refers to OLD/NEW values.
4151 : */
4152 : Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4153 : (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4154 :
4155 694 : slot = ExecProcessReturning(&context, resultRelInfo, operation,
4156 : NULL, NULL, context.planSlot);
4157 :
4158 694 : return slot;
4159 : }
4160 :
4161 13382184 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4162 13382184 : slot = context.planSlot;
4163 :
4164 13382184 : tupleid = NULL;
4165 13382184 : oldtuple = NULL;
4166 :
4167 : /*
4168 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4169 : * to be updated/deleted/merged. For a heap relation, that's a TID;
4170 : * otherwise we may have a wholerow junk attr that carries the old
4171 : * tuple in toto. Keep this in step with the part of
4172 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4173 : */
4174 13382184 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4175 : operation == CMD_MERGE)
4176 : {
4177 : char relkind;
4178 : Datum datum;
4179 : bool isNull;
4180 :
4181 1961442 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4182 1961442 : if (relkind == RELKIND_RELATION ||
4183 562 : relkind == RELKIND_MATVIEW ||
4184 : relkind == RELKIND_PARTITIONED_TABLE)
4185 : {
4186 : /* ri_RowIdAttNo refers to a ctid attribute */
4187 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
4188 1960886 : datum = ExecGetJunkAttribute(slot,
4189 1960886 : resultRelInfo->ri_RowIdAttNo,
4190 : &isNull);
4191 :
4192 : /*
4193 : * For commands other than MERGE, any tuples having a null row
4194 : * identifier are errors. For MERGE, we may need to handle
4195 : * them as WHEN NOT MATCHED clauses if any, so do that.
4196 : *
4197 : * Note that we use the node's toplevel resultRelInfo, not any
4198 : * specific partition's.
4199 : */
4200 1960886 : if (isNull)
4201 : {
4202 2110 : if (operation == CMD_MERGE)
4203 : {
4204 2110 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4205 :
4206 2110 : slot = ExecMerge(&context, node->resultRelInfo,
4207 2110 : NULL, NULL, node->canSetTag);
4208 :
4209 : /*
4210 : * If we got a RETURNING result, return it to the
4211 : * caller. We'll continue the work on next call.
4212 : */
4213 2068 : if (slot)
4214 108 : return slot;
4215 :
4216 2002 : continue; /* continue with the next tuple */
4217 : }
4218 :
4219 0 : elog(ERROR, "ctid is NULL");
4220 : }
4221 :
4222 1958776 : tupleid = (ItemPointer) DatumGetPointer(datum);
4223 1958776 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4224 1958776 : tupleid = &tuple_ctid;
4225 : }
4226 :
4227 : /*
4228 : * Use the wholerow attribute, when available, to reconstruct the
4229 : * old relation tuple. The old tuple serves one or both of two
4230 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4231 : * provides values for any unchanged columns for the NEW tuple of
4232 : * an UPDATE, because the subplan does not produce all the columns
4233 : * of the target table.
4234 : *
4235 : * Note that the wholerow attribute does not carry system columns,
4236 : * so foreign table triggers miss seeing those, except that we
4237 : * know enough here to set t_tableOid. Quite separately from
4238 : * this, the FDW may fetch its own junk attrs to identify the row.
4239 : *
4240 : * Other relevant relkinds, currently limited to views, always
4241 : * have a wholerow attribute.
4242 : */
4243 556 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4244 : {
4245 530 : datum = ExecGetJunkAttribute(slot,
4246 530 : resultRelInfo->ri_RowIdAttNo,
4247 : &isNull);
4248 :
4249 : /*
4250 : * For commands other than MERGE, any tuples having a null row
4251 : * identifier are errors. For MERGE, we may need to handle
4252 : * them as WHEN NOT MATCHED clauses if any, so do that.
4253 : *
4254 : * Note that we use the node's toplevel resultRelInfo, not any
4255 : * specific partition's.
4256 : */
4257 530 : if (isNull)
4258 : {
4259 48 : if (operation == CMD_MERGE)
4260 : {
4261 48 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4262 :
4263 48 : slot = ExecMerge(&context, node->resultRelInfo,
4264 48 : NULL, NULL, node->canSetTag);
4265 :
4266 : /*
4267 : * If we got a RETURNING result, return it to the
4268 : * caller. We'll continue the work on next call.
4269 : */
4270 42 : if (slot)
4271 12 : return slot;
4272 :
4273 30 : continue; /* continue with the next tuple */
4274 : }
4275 :
4276 0 : elog(ERROR, "wholerow is NULL");
4277 : }
4278 :
4279 482 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4280 482 : oldtupdata.t_len =
4281 482 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4282 482 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4283 : /* Historically, view triggers see invalid t_tableOid. */
4284 482 : oldtupdata.t_tableOid =
4285 482 : (relkind == RELKIND_VIEW) ? InvalidOid :
4286 206 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4287 :
4288 482 : oldtuple = &oldtupdata;
4289 : }
4290 : else
4291 : {
4292 : /* Only foreign tables are allowed to omit a row-ID attr */
4293 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4294 : }
4295 : }
4296 :
4297 13380026 : switch (operation)
4298 : {
4299 11420742 : case CMD_INSERT:
4300 : /* Initialize projection info if first time for this table */
4301 11420742 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4302 91096 : ExecInitInsertProjection(node, resultRelInfo);
4303 11420742 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4304 11420742 : slot = ExecInsert(&context, resultRelInfo, slot,
4305 11420742 : node->canSetTag, NULL, NULL);
4306 11418728 : break;
4307 :
4308 309114 : case CMD_UPDATE:
4309 309114 : tuplock = false;
4310 :
4311 : /* Initialize projection info if first time for this table */
4312 309114 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4313 12660 : ExecInitUpdateProjection(node, resultRelInfo);
4314 :
4315 : /*
4316 : * Make the new tuple by combining plan's output tuple with
4317 : * the old tuple being updated.
4318 : */
4319 309114 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4320 309114 : if (oldtuple != NULL)
4321 : {
4322 : Assert(!resultRelInfo->ri_needLockTagTuple);
4323 : /* Use the wholerow junk attr as the old tuple. */
4324 314 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4325 : }
4326 : else
4327 : {
4328 : /* Fetch the most recent version of old tuple. */
4329 308800 : Relation relation = resultRelInfo->ri_RelationDesc;
4330 :
4331 308800 : if (resultRelInfo->ri_needLockTagTuple)
4332 : {
4333 16942 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4334 16942 : tuplock = true;
4335 : }
4336 308800 : if (!table_tuple_fetch_row_version(relation, tupleid,
4337 : SnapshotAny,
4338 : oldSlot))
4339 0 : elog(ERROR, "failed to fetch tuple being updated");
4340 : }
4341 309114 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4342 : oldSlot);
4343 :
4344 : /* Now apply the update. */
4345 309114 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4346 309114 : oldSlot, slot, node->canSetTag);
4347 308610 : if (tuplock)
4348 16942 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4349 : InplaceUpdateTupleLock);
4350 308610 : break;
4351 :
4352 1638538 : case CMD_DELETE:
4353 1638538 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4354 1638538 : true, false, node->canSetTag, NULL, NULL, NULL);
4355 1638456 : break;
4356 :
4357 11632 : case CMD_MERGE:
4358 11632 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4359 11632 : node->canSetTag);
4360 11542 : break;
4361 :
4362 0 : default:
4363 0 : elog(ERROR, "unknown operation");
4364 : break;
4365 : }
4366 :
4367 : /*
4368 : * If we got a RETURNING result, return it to caller. We'll continue
4369 : * the work on next call.
4370 : */
4371 13377336 : if (slot)
4372 6940 : return slot;
4373 : }
4374 :
4375 : /*
4376 : * Insert remaining tuples for batch insert.
4377 : */
4378 115458 : if (estate->es_insert_pending_result_relations != NIL)
4379 24 : ExecPendingInserts(estate);
4380 :
4381 : /*
4382 : * We're done, but fire AFTER STATEMENT triggers before exiting.
4383 : */
4384 115458 : fireASTriggers(node);
4385 :
4386 115458 : node->mt_done = true;
4387 :
4388 115458 : return NULL;
4389 : }
4390 :
4391 : /*
4392 : * ExecLookupResultRelByOid
4393 : * If the table with given OID is among the result relations to be
4394 : * updated by the given ModifyTable node, return its ResultRelInfo.
4395 : *
4396 : * If not found, return NULL if missing_ok, else raise error.
4397 : *
4398 : * If update_cache is true, then upon successful lookup, update the node's
4399 : * one-element cache. ONLY ExecModifyTable may pass true for this.
4400 : */
4401 : ResultRelInfo *
4402 11822 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4403 : bool missing_ok, bool update_cache)
4404 : {
4405 11822 : if (node->mt_resultOidHash)
4406 : {
4407 : /* Use the pre-built hash table to locate the rel */
4408 : MTTargetRelLookup *mtlookup;
4409 :
4410 : mtlookup = (MTTargetRelLookup *)
4411 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4412 0 : if (mtlookup)
4413 : {
4414 0 : if (update_cache)
4415 : {
4416 0 : node->mt_lastResultOid = resultoid;
4417 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
4418 : }
4419 0 : return node->resultRelInfo + mtlookup->relationIndex;
4420 : }
4421 : }
4422 : else
4423 : {
4424 : /* With few target rels, just search the ResultRelInfo array */
4425 22540 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4426 : {
4427 14342 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4428 :
4429 14342 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4430 : {
4431 3624 : if (update_cache)
4432 : {
4433 3142 : node->mt_lastResultOid = resultoid;
4434 3142 : node->mt_lastResultIndex = ndx;
4435 : }
4436 3624 : return rInfo;
4437 : }
4438 : }
4439 : }
4440 :
4441 8198 : if (!missing_ok)
4442 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
4443 8198 : return NULL;
4444 : }
4445 :
4446 : /* ----------------------------------------------------------------
4447 : * ExecInitModifyTable
4448 : * ----------------------------------------------------------------
4449 : */
4450 : ModifyTableState *
4451 119912 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4452 : {
4453 : ModifyTableState *mtstate;
4454 119912 : Plan *subplan = outerPlan(node);
4455 119912 : CmdType operation = node->operation;
4456 119912 : int nrels = list_length(node->resultRelations);
4457 : ResultRelInfo *resultRelInfo;
4458 : List *arowmarks;
4459 : ListCell *l;
4460 : int i;
4461 : Relation rel;
4462 :
4463 : /* check for unsupported flags */
4464 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4465 :
4466 : /*
4467 : * create state structure
4468 : */
4469 119912 : mtstate = makeNode(ModifyTableState);
4470 119912 : mtstate->ps.plan = (Plan *) node;
4471 119912 : mtstate->ps.state = estate;
4472 119912 : mtstate->ps.ExecProcNode = ExecModifyTable;
4473 :
4474 119912 : mtstate->operation = operation;
4475 119912 : mtstate->canSetTag = node->canSetTag;
4476 119912 : mtstate->mt_done = false;
4477 :
4478 119912 : mtstate->mt_nrels = nrels;
4479 119912 : mtstate->resultRelInfo = (ResultRelInfo *)
4480 119912 : palloc(nrels * sizeof(ResultRelInfo));
4481 :
4482 119912 : mtstate->mt_merge_pending_not_matched = NULL;
4483 119912 : mtstate->mt_merge_inserted = 0;
4484 119912 : mtstate->mt_merge_updated = 0;
4485 119912 : mtstate->mt_merge_deleted = 0;
4486 :
4487 : /*----------
4488 : * Resolve the target relation. This is the same as:
4489 : *
4490 : * - the relation for which we will fire FOR STATEMENT triggers,
4491 : * - the relation into whose tuple format all captured transition tuples
4492 : * must be converted, and
4493 : * - the root partitioned table used for tuple routing.
4494 : *
4495 : * If it's a partitioned or inherited table, the root partition or
4496 : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4497 : * given explicitly in node->rootRelation. Otherwise, the target relation
4498 : * is the sole relation in the node->resultRelations list.
4499 : *----------
4500 : */
4501 119912 : if (node->rootRelation > 0)
4502 : {
4503 2720 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4504 2720 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4505 : node->rootRelation);
4506 : }
4507 : else
4508 : {
4509 : Assert(list_length(node->resultRelations) == 1);
4510 117192 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4511 117192 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4512 117192 : linitial_int(node->resultRelations));
4513 : }
4514 :
4515 : /* set up epqstate with dummy subplan data for the moment */
4516 119912 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4517 : node->epqParam, node->resultRelations);
4518 119912 : mtstate->fireBSTriggers = true;
4519 :
4520 : /*
4521 : * Build state for collecting transition tuples. This requires having a
4522 : * valid trigger query context, so skip it in explain-only mode.
4523 : */
4524 119912 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4525 118962 : ExecSetupTransitionCaptureState(mtstate, estate);
4526 :
4527 : /*
4528 : * Open all the result relations and initialize the ResultRelInfo structs.
4529 : * (But root relation was initialized above, if it's part of the array.)
4530 : * We must do this before initializing the subplan, because direct-modify
4531 : * FDWs expect their ResultRelInfos to be available.
4532 : */
4533 119912 : resultRelInfo = mtstate->resultRelInfo;
4534 119912 : i = 0;
4535 241886 : foreach(l, node->resultRelations)
4536 : {
4537 122280 : Index resultRelation = lfirst_int(l);
4538 122280 : List *mergeActions = NIL;
4539 :
4540 122280 : if (node->mergeActionLists)
4541 1664 : mergeActions = list_nth(node->mergeActionLists, i);
4542 :
4543 122280 : if (resultRelInfo != mtstate->rootResultRelInfo)
4544 : {
4545 5088 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4546 :
4547 : /*
4548 : * For child result relations, store the root result relation
4549 : * pointer. We do so for the convenience of places that want to
4550 : * look at the query's original target relation but don't have the
4551 : * mtstate handy.
4552 : */
4553 5088 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4554 : }
4555 :
4556 : /* Initialize the usesFdwDirectModify flag */
4557 122280 : resultRelInfo->ri_usesFdwDirectModify =
4558 122280 : bms_is_member(i, node->fdwDirectModifyPlans);
4559 :
4560 : /*
4561 : * Verify result relation is a valid target for the current operation
4562 : */
4563 122280 : CheckValidResultRel(resultRelInfo, operation, mergeActions);
4564 :
4565 121974 : resultRelInfo++;
4566 121974 : i++;
4567 : }
4568 :
4569 : /*
4570 : * Now we may initialize the subplan.
4571 : */
4572 119606 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4573 :
4574 : /*
4575 : * Do additional per-result-relation initialization.
4576 : */
4577 241546 : for (i = 0; i < nrels; i++)
4578 : {
4579 121940 : resultRelInfo = &mtstate->resultRelInfo[i];
4580 :
4581 : /* Let FDWs init themselves for foreign-table result rels */
4582 121940 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4583 121732 : resultRelInfo->ri_FdwRoutine != NULL &&
4584 322 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4585 : {
4586 322 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4587 :
4588 322 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4589 : resultRelInfo,
4590 : fdw_private,
4591 : i,
4592 : eflags);
4593 : }
4594 :
4595 : /*
4596 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4597 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4598 : * tables, the FDW might have created additional junk attr(s), but
4599 : * those are no concern of ours.
4600 : */
4601 121940 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4602 : operation == CMD_MERGE)
4603 : {
4604 : char relkind;
4605 :
4606 29446 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4607 29446 : if (relkind == RELKIND_RELATION ||
4608 680 : relkind == RELKIND_MATVIEW ||
4609 : relkind == RELKIND_PARTITIONED_TABLE)
4610 : {
4611 28802 : resultRelInfo->ri_RowIdAttNo =
4612 28802 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4613 28802 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4614 0 : elog(ERROR, "could not find junk ctid column");
4615 : }
4616 644 : else if (relkind == RELKIND_FOREIGN_TABLE)
4617 : {
4618 : /*
4619 : * We don't support MERGE with foreign tables for now. (It's
4620 : * problematic because the implementation uses CTID.)
4621 : */
4622 : Assert(operation != CMD_MERGE);
4623 :
4624 : /*
4625 : * When there is a row-level trigger, there should be a
4626 : * wholerow attribute. We also require it to be present in
4627 : * UPDATE and MERGE, so we can get the values of unchanged
4628 : * columns.
4629 : */
4630 356 : resultRelInfo->ri_RowIdAttNo =
4631 356 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4632 : "wholerow");
4633 356 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4634 202 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4635 0 : elog(ERROR, "could not find junk wholerow column");
4636 : }
4637 : else
4638 : {
4639 : /* Other valid target relkinds must provide wholerow */
4640 288 : resultRelInfo->ri_RowIdAttNo =
4641 288 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4642 : "wholerow");
4643 288 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4644 0 : elog(ERROR, "could not find junk wholerow column");
4645 : }
4646 : }
4647 : }
4648 :
4649 : /*
4650 : * If this is an inherited update/delete/merge, there will be a junk
4651 : * attribute named "tableoid" present in the subplan's targetlist. It
4652 : * will be used to identify the result relation for a given tuple to be
4653 : * updated/deleted/merged.
4654 : */
4655 119606 : mtstate->mt_resultOidAttno =
4656 119606 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4657 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || nrels == 1);
4658 119606 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4659 119606 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4660 :
4661 : /* Get the root target relation */
4662 119606 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4663 :
4664 : /*
4665 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4666 : * or MERGE might need this too, but only if it actually moves tuples
4667 : * between partitions; in that case setup is done by
4668 : * ExecCrossPartitionUpdate.
4669 : */
4670 119606 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4671 : operation == CMD_INSERT)
4672 5864 : mtstate->mt_partition_tuple_routing =
4673 5864 : ExecSetupPartitionTupleRouting(estate, rel);
4674 :
4675 : /*
4676 : * Initialize any WITH CHECK OPTION constraints if needed.
4677 : */
4678 119606 : resultRelInfo = mtstate->resultRelInfo;
4679 120940 : foreach(l, node->withCheckOptionLists)
4680 : {
4681 1334 : List *wcoList = (List *) lfirst(l);
4682 1334 : List *wcoExprs = NIL;
4683 : ListCell *ll;
4684 :
4685 3632 : foreach(ll, wcoList)
4686 : {
4687 2298 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4688 2298 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4689 : &mtstate->ps);
4690 :
4691 2298 : wcoExprs = lappend(wcoExprs, wcoExpr);
4692 : }
4693 :
4694 1334 : resultRelInfo->ri_WithCheckOptions = wcoList;
4695 1334 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4696 1334 : resultRelInfo++;
4697 : }
4698 :
4699 : /*
4700 : * Initialize RETURNING projections if needed.
4701 : */
4702 119606 : if (node->returningLists)
4703 : {
4704 : TupleTableSlot *slot;
4705 : ExprContext *econtext;
4706 :
4707 : /*
4708 : * Initialize result tuple slot and assign its rowtype using the first
4709 : * RETURNING list. We assume the rest will look the same.
4710 : */
4711 4470 : mtstate->ps.plan->targetlist = (List *) linitial(node->returningLists);
4712 :
4713 : /* Set up a slot for the output of the RETURNING projection(s) */
4714 4470 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
4715 4470 : slot = mtstate->ps.ps_ResultTupleSlot;
4716 :
4717 : /* Need an econtext too */
4718 4470 : if (mtstate->ps.ps_ExprContext == NULL)
4719 4470 : ExecAssignExprContext(estate, &mtstate->ps);
4720 4470 : econtext = mtstate->ps.ps_ExprContext;
4721 :
4722 : /*
4723 : * Build a projection for each result rel.
4724 : */
4725 4470 : resultRelInfo = mtstate->resultRelInfo;
4726 9286 : foreach(l, node->returningLists)
4727 : {
4728 4816 : List *rlist = (List *) lfirst(l);
4729 :
4730 4816 : resultRelInfo->ri_returningList = rlist;
4731 4816 : resultRelInfo->ri_projectReturning =
4732 4816 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
4733 4816 : resultRelInfo->ri_RelationDesc->rd_att);
4734 4816 : resultRelInfo++;
4735 : }
4736 : }
4737 : else
4738 : {
4739 : /*
4740 : * We still must construct a dummy result tuple type, because InitPlan
4741 : * expects one (maybe should change that?).
4742 : */
4743 115136 : mtstate->ps.plan->targetlist = NIL;
4744 115136 : ExecInitResultTypeTL(&mtstate->ps);
4745 :
4746 115136 : mtstate->ps.ps_ExprContext = NULL;
4747 : }
4748 :
4749 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
4750 119606 : resultRelInfo = mtstate->resultRelInfo;
4751 119606 : if (node->onConflictAction != ONCONFLICT_NONE)
4752 : {
4753 : /* insert may only have one relation, inheritance is not expanded */
4754 : Assert(nrels == 1);
4755 1356 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
4756 : }
4757 :
4758 : /*
4759 : * If needed, Initialize target list, projection and qual for ON CONFLICT
4760 : * DO UPDATE.
4761 : */
4762 119606 : if (node->onConflictAction == ONCONFLICT_UPDATE)
4763 : {
4764 912 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
4765 : ExprContext *econtext;
4766 : TupleDesc relationDesc;
4767 :
4768 : /* already exists if created by RETURNING processing above */
4769 912 : if (mtstate->ps.ps_ExprContext == NULL)
4770 632 : ExecAssignExprContext(estate, &mtstate->ps);
4771 :
4772 912 : econtext = mtstate->ps.ps_ExprContext;
4773 912 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
4774 :
4775 : /* create state for DO UPDATE SET operation */
4776 912 : resultRelInfo->ri_onConflict = onconfl;
4777 :
4778 : /* initialize slot for the existing tuple */
4779 912 : onconfl->oc_Existing =
4780 912 : table_slot_create(resultRelInfo->ri_RelationDesc,
4781 912 : &mtstate->ps.state->es_tupleTable);
4782 :
4783 : /*
4784 : * Create the tuple slot for the UPDATE SET projection. We want a slot
4785 : * of the table's type here, because the slot will be used to insert
4786 : * into the table, and for RETURNING processing - which may access
4787 : * system attributes.
4788 : */
4789 912 : onconfl->oc_ProjSlot =
4790 912 : table_slot_create(resultRelInfo->ri_RelationDesc,
4791 912 : &mtstate->ps.state->es_tupleTable);
4792 :
4793 : /* build UPDATE SET projection state */
4794 912 : onconfl->oc_ProjInfo =
4795 912 : ExecBuildUpdateProjection(node->onConflictSet,
4796 : true,
4797 : node->onConflictCols,
4798 : relationDesc,
4799 : econtext,
4800 : onconfl->oc_ProjSlot,
4801 : &mtstate->ps);
4802 :
4803 : /* initialize state to evaluate the WHERE clause, if any */
4804 912 : if (node->onConflictWhere)
4805 : {
4806 : ExprState *qualexpr;
4807 :
4808 176 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
4809 : &mtstate->ps);
4810 176 : onconfl->oc_WhereClause = qualexpr;
4811 : }
4812 : }
4813 :
4814 : /*
4815 : * If we have any secondary relations in an UPDATE or DELETE, they need to
4816 : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
4817 : * EvalPlanQual mechanism needs to be told about them. This also goes for
4818 : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
4819 : */
4820 119606 : arowmarks = NIL;
4821 122312 : foreach(l, node->rowMarks)
4822 : {
4823 2706 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
4824 : ExecRowMark *erm;
4825 : ExecAuxRowMark *aerm;
4826 :
4827 : /* ignore "parent" rowmarks; they are irrelevant at runtime */
4828 2706 : if (rc->isParent)
4829 142 : continue;
4830 :
4831 : /* Find ExecRowMark and build ExecAuxRowMark */
4832 2564 : erm = ExecFindRowMark(estate, rc->rti, false);
4833 2564 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
4834 2564 : arowmarks = lappend(arowmarks, aerm);
4835 : }
4836 :
4837 : /* For a MERGE command, initialize its state */
4838 119606 : if (mtstate->operation == CMD_MERGE)
4839 1432 : ExecInitMerge(mtstate, estate);
4840 :
4841 119606 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
4842 :
4843 : /*
4844 : * If there are a lot of result relations, use a hash table to speed the
4845 : * lookups. If there are not a lot, a simple linear search is faster.
4846 : *
4847 : * It's not clear where the threshold is, but try 64 for starters. In a
4848 : * debugging build, use a small threshold so that we get some test
4849 : * coverage of both code paths.
4850 : */
4851 : #ifdef USE_ASSERT_CHECKING
4852 : #define MT_NRELS_HASH 4
4853 : #else
4854 : #define MT_NRELS_HASH 64
4855 : #endif
4856 119606 : if (nrels >= MT_NRELS_HASH)
4857 : {
4858 : HASHCTL hash_ctl;
4859 :
4860 0 : hash_ctl.keysize = sizeof(Oid);
4861 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
4862 0 : hash_ctl.hcxt = CurrentMemoryContext;
4863 0 : mtstate->mt_resultOidHash =
4864 0 : hash_create("ModifyTable target hash",
4865 : nrels, &hash_ctl,
4866 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
4867 0 : for (i = 0; i < nrels; i++)
4868 : {
4869 : Oid hashkey;
4870 : MTTargetRelLookup *mtlookup;
4871 : bool found;
4872 :
4873 0 : resultRelInfo = &mtstate->resultRelInfo[i];
4874 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
4875 : mtlookup = (MTTargetRelLookup *)
4876 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
4877 : HASH_ENTER, &found);
4878 : Assert(!found);
4879 0 : mtlookup->relationIndex = i;
4880 : }
4881 : }
4882 : else
4883 119606 : mtstate->mt_resultOidHash = NULL;
4884 :
4885 : /*
4886 : * Determine if the FDW supports batch insert and determine the batch size
4887 : * (a FDW may support batching, but it may be disabled for the
4888 : * server/table).
4889 : *
4890 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
4891 : * remains set to 0.
4892 : */
4893 119606 : if (operation == CMD_INSERT)
4894 : {
4895 : /* insert may only have one relation, inheritance is not expanded */
4896 : Assert(nrels == 1);
4897 92494 : resultRelInfo = mtstate->resultRelInfo;
4898 92494 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4899 92494 : resultRelInfo->ri_FdwRoutine != NULL &&
4900 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
4901 174 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
4902 : {
4903 174 : resultRelInfo->ri_BatchSize =
4904 174 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
4905 174 : Assert(resultRelInfo->ri_BatchSize >= 1);
4906 : }
4907 : else
4908 92320 : resultRelInfo->ri_BatchSize = 1;
4909 : }
4910 :
4911 : /*
4912 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
4913 : * to estate->es_auxmodifytables so that it will be run to completion by
4914 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
4915 : * ModifyTable node too, but there's no need.) Note the use of lcons not
4916 : * lappend: we need later-initialized ModifyTable nodes to be shut down
4917 : * before earlier ones. This ensures that we don't throw away RETURNING
4918 : * rows that need to be seen by a later CTE subplan.
4919 : */
4920 119606 : if (!mtstate->canSetTag)
4921 924 : estate->es_auxmodifytables = lcons(mtstate,
4922 : estate->es_auxmodifytables);
4923 :
4924 119606 : return mtstate;
4925 : }
4926 :
4927 : /* ----------------------------------------------------------------
4928 : * ExecEndModifyTable
4929 : *
4930 : * Shuts down the plan.
4931 : *
4932 : * Returns nothing of interest.
4933 : * ----------------------------------------------------------------
4934 : */
4935 : void
4936 115336 : ExecEndModifyTable(ModifyTableState *node)
4937 : {
4938 : int i;
4939 :
4940 : /*
4941 : * Allow any FDWs to shut down
4942 : */
4943 232704 : for (i = 0; i < node->mt_nrels; i++)
4944 : {
4945 : int j;
4946 117368 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
4947 :
4948 117368 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4949 117176 : resultRelInfo->ri_FdwRoutine != NULL &&
4950 302 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
4951 302 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
4952 : resultRelInfo);
4953 :
4954 : /*
4955 : * Cleanup the initialized batch slots. This only matters for FDWs
4956 : * with batching, but the other cases will have ri_NumSlotsInitialized
4957 : * == 0.
4958 : */
4959 117424 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
4960 : {
4961 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
4962 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
4963 : }
4964 : }
4965 :
4966 : /*
4967 : * Close all the partitioned tables, leaf partitions, and their indices
4968 : * and release the slot used for tuple routing, if set.
4969 : */
4970 115336 : if (node->mt_partition_tuple_routing)
4971 : {
4972 5906 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
4973 :
4974 5906 : if (node->mt_root_tuple_slot)
4975 596 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
4976 : }
4977 :
4978 : /*
4979 : * Terminate EPQ execution if active
4980 : */
4981 115336 : EvalPlanQualEnd(&node->mt_epqstate);
4982 :
4983 : /*
4984 : * shut down subplan
4985 : */
4986 115336 : ExecEndNode(outerPlanState(node));
4987 115336 : }
4988 :
4989 : void
4990 0 : ExecReScanModifyTable(ModifyTableState *node)
4991 : {
4992 : /*
4993 : * Currently, we don't need to support rescan on ModifyTable nodes. The
4994 : * semantics of that would be a bit debatable anyway.
4995 : */
4996 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
4997 : }
|