Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * nodeModifyTable.c
4 : * routines to handle ModifyTable nodes.
5 : *
6 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * src/backend/executor/nodeModifyTable.c
12 : *
13 : *-------------------------------------------------------------------------
14 : */
15 : /* INTERFACE ROUTINES
16 : * ExecInitModifyTable - initialize the ModifyTable node
17 : * ExecModifyTable - retrieve the next tuple from the node
18 : * ExecEndModifyTable - shut down the ModifyTable node
19 : * ExecReScanModifyTable - rescan the ModifyTable node
20 : *
21 : * NOTES
22 : * The ModifyTable node receives input from its outerPlan, which is
23 : * the data to insert for INSERT cases, the changed columns' new
24 : * values plus row-locating info for UPDATE and MERGE cases, or just the
25 : * row-locating info for DELETE cases.
26 : *
27 : * The relation to modify can be an ordinary table, a foreign table, or a
28 : * view. If it's a view, either it has sufficient INSTEAD OF triggers or
29 : * this node executes only MERGE ... DO NOTHING. If the original MERGE
30 : * targeted a view not in one of those two categories, earlier processing
31 : * already pointed the ModifyTable result relation to an underlying
32 : * relation of that other view. This node does process
33 : * ri_WithCheckOptions, which may have expressions from those other,
34 : * automatically updatable views.
35 : *
36 : * MERGE runs a join between the source relation and the target table.
37 : * If any WHEN NOT MATCHED [BY TARGET] clauses are present, then the join
38 : * is an outer join that might output tuples without a matching target
39 : * tuple. In this case, any unmatched target tuples will have NULL
40 : * row-locating info, and only INSERT can be run. But for matched target
41 : * tuples, the row-locating info is used to determine the tuple to UPDATE
42 : * or DELETE. When all clauses are WHEN MATCHED or WHEN NOT MATCHED BY
43 : * SOURCE, all tuples produced by the join will include a matching target
44 : * tuple, so all tuples contain row-locating info.
45 : *
46 : * If the query specifies RETURNING, then the ModifyTable returns a
47 : * RETURNING tuple after completing each row insert, update, or delete.
48 : * It must be called again to continue the operation. Without RETURNING,
49 : * we just loop within the node until all the work is done, then
50 : * return NULL. This avoids useless call/return overhead.
51 : */
52 :
53 : #include "postgres.h"
54 :
55 : #include "access/htup_details.h"
56 : #include "access/tableam.h"
57 : #include "access/xact.h"
58 : #include "commands/trigger.h"
59 : #include "executor/execPartition.h"
60 : #include "executor/executor.h"
61 : #include "executor/nodeModifyTable.h"
62 : #include "foreign/fdwapi.h"
63 : #include "miscadmin.h"
64 : #include "nodes/nodeFuncs.h"
65 : #include "optimizer/optimizer.h"
66 : #include "rewrite/rewriteHandler.h"
67 : #include "rewrite/rewriteManip.h"
68 : #include "storage/lmgr.h"
69 : #include "utils/builtins.h"
70 : #include "utils/datum.h"
71 : #include "utils/rel.h"
72 : #include "utils/snapmgr.h"
73 :
74 :
75 : typedef struct MTTargetRelLookup
76 : {
77 : Oid relationOid; /* hash key, must be first */
78 : int relationIndex; /* rel's index in resultRelInfo[] array */
79 : } MTTargetRelLookup;
80 :
81 : /*
82 : * Context struct for a ModifyTable operation, containing basic execution
83 : * state and some output variables populated by ExecUpdateAct() and
84 : * ExecDeleteAct() to report the result of their actions to callers.
85 : */
86 : typedef struct ModifyTableContext
87 : {
88 : /* Operation state */
89 : ModifyTableState *mtstate;
90 : EPQState *epqstate;
91 : EState *estate;
92 :
93 : /*
94 : * Slot containing tuple obtained from ModifyTable's subplan. Used to
95 : * access "junk" columns that are not going to be stored.
96 : */
97 : TupleTableSlot *planSlot;
98 :
99 : /*
100 : * Information about the changes that were made concurrently to a tuple
101 : * being updated or deleted
102 : */
103 : TM_FailureData tmfd;
104 :
105 : /*
106 : * The tuple deleted when doing a cross-partition UPDATE with a RETURNING
107 : * clause that refers to OLD columns (converted to the root's tuple
108 : * descriptor).
109 : */
110 : TupleTableSlot *cpDeletedSlot;
111 :
112 : /*
113 : * The tuple projected by the INSERT's RETURNING clause, when doing a
114 : * cross-partition UPDATE
115 : */
116 : TupleTableSlot *cpUpdateReturningSlot;
117 : } ModifyTableContext;
118 :
119 : /*
120 : * Context struct containing output data specific to UPDATE operations.
121 : */
122 : typedef struct UpdateContext
123 : {
124 : bool crossPartUpdate; /* was it a cross-partition update? */
125 : TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
126 :
127 : /*
128 : * Lock mode to acquire on the latest tuple version before performing
129 : * EvalPlanQual on it
130 : */
131 : LockTupleMode lockmode;
132 : } UpdateContext;
133 :
134 :
135 : static void ExecBatchInsert(ModifyTableState *mtstate,
136 : ResultRelInfo *resultRelInfo,
137 : TupleTableSlot **slots,
138 : TupleTableSlot **planSlots,
139 : int numSlots,
140 : EState *estate,
141 : bool canSetTag);
142 : static void ExecPendingInserts(EState *estate);
143 : static void ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
144 : ResultRelInfo *sourcePartInfo,
145 : ResultRelInfo *destPartInfo,
146 : ItemPointer tupleid,
147 : TupleTableSlot *oldslot,
148 : TupleTableSlot *newslot);
149 : static bool ExecOnConflictUpdate(ModifyTableContext *context,
150 : ResultRelInfo *resultRelInfo,
151 : ItemPointer conflictTid,
152 : TupleTableSlot *excludedSlot,
153 : bool canSetTag,
154 : TupleTableSlot **returning);
155 : static TupleTableSlot *ExecPrepareTupleRouting(ModifyTableState *mtstate,
156 : EState *estate,
157 : PartitionTupleRouting *proute,
158 : ResultRelInfo *targetRelInfo,
159 : TupleTableSlot *slot,
160 : ResultRelInfo **partRelInfo);
161 :
162 : static TupleTableSlot *ExecMerge(ModifyTableContext *context,
163 : ResultRelInfo *resultRelInfo,
164 : ItemPointer tupleid,
165 : HeapTuple oldtuple,
166 : bool canSetTag);
167 : static void ExecInitMerge(ModifyTableState *mtstate, EState *estate);
168 : static TupleTableSlot *ExecMergeMatched(ModifyTableContext *context,
169 : ResultRelInfo *resultRelInfo,
170 : ItemPointer tupleid,
171 : HeapTuple oldtuple,
172 : bool canSetTag,
173 : bool *matched);
174 : static TupleTableSlot *ExecMergeNotMatched(ModifyTableContext *context,
175 : ResultRelInfo *resultRelInfo,
176 : bool canSetTag);
177 :
178 :
179 : /*
180 : * Verify that the tuples to be produced by INSERT match the
181 : * target relation's rowtype
182 : *
183 : * We do this to guard against stale plans. If plan invalidation is
184 : * functioning properly then we should never get a failure here, but better
185 : * safe than sorry. Note that this is called after we have obtained lock
186 : * on the target rel, so the rowtype can't change underneath us.
187 : *
188 : * The plan output is represented by its targetlist, because that makes
189 : * handling the dropped-column case easier.
190 : *
191 : * We used to use this for UPDATE as well, but now the equivalent checks
192 : * are done in ExecBuildUpdateProjection.
193 : */
194 : static void
195 92120 : ExecCheckPlanOutput(Relation resultRel, List *targetList)
196 : {
197 92120 : TupleDesc resultDesc = RelationGetDescr(resultRel);
198 92120 : int attno = 0;
199 : ListCell *lc;
200 :
201 285802 : foreach(lc, targetList)
202 : {
203 193682 : TargetEntry *tle = (TargetEntry *) lfirst(lc);
204 : Form_pg_attribute attr;
205 :
206 : Assert(!tle->resjunk); /* caller removed junk items already */
207 :
208 193682 : if (attno >= resultDesc->natts)
209 0 : ereport(ERROR,
210 : (errcode(ERRCODE_DATATYPE_MISMATCH),
211 : errmsg("table row type and query-specified row type do not match"),
212 : errdetail("Query has too many columns.")));
213 193682 : attr = TupleDescAttr(resultDesc, attno);
214 193682 : attno++;
215 :
216 : /*
217 : * Special cases here should match planner's expand_insert_targetlist.
218 : */
219 193682 : if (attr->attisdropped)
220 : {
221 : /*
222 : * For a dropped column, we can't check atttypid (it's likely 0).
223 : * In any case the planner has most likely inserted an INT4 null.
224 : * What we insist on is just *some* NULL constant.
225 : */
226 628 : if (!IsA(tle->expr, Const) ||
227 628 : !((Const *) tle->expr)->constisnull)
228 0 : ereport(ERROR,
229 : (errcode(ERRCODE_DATATYPE_MISMATCH),
230 : errmsg("table row type and query-specified row type do not match"),
231 : errdetail("Query provides a value for a dropped column at ordinal position %d.",
232 : attno)));
233 : }
234 193054 : else if (attr->attgenerated)
235 : {
236 : /*
237 : * For a generated column, the planner will have inserted a null
238 : * of the column's base type (to avoid possibly failing on domain
239 : * not-null constraints). It doesn't seem worth insisting on that
240 : * exact type though, since a null value is type-independent. As
241 : * above, just insist on *some* NULL constant.
242 : */
243 1130 : if (!IsA(tle->expr, Const) ||
244 1130 : !((Const *) tle->expr)->constisnull)
245 0 : ereport(ERROR,
246 : (errcode(ERRCODE_DATATYPE_MISMATCH),
247 : errmsg("table row type and query-specified row type do not match"),
248 : errdetail("Query provides a value for a generated column at ordinal position %d.",
249 : attno)));
250 : }
251 : else
252 : {
253 : /* Normal case: demand type match */
254 191924 : if (exprType((Node *) tle->expr) != attr->atttypid)
255 0 : ereport(ERROR,
256 : (errcode(ERRCODE_DATATYPE_MISMATCH),
257 : errmsg("table row type and query-specified row type do not match"),
258 : errdetail("Table has type %s at ordinal position %d, but query expects %s.",
259 : format_type_be(attr->atttypid),
260 : attno,
261 : format_type_be(exprType((Node *) tle->expr)))));
262 : }
263 : }
264 92120 : if (attno != resultDesc->natts)
265 0 : ereport(ERROR,
266 : (errcode(ERRCODE_DATATYPE_MISMATCH),
267 : errmsg("table row type and query-specified row type do not match"),
268 : errdetail("Query has too few columns.")));
269 92120 : }
270 :
271 : /*
272 : * ExecProcessReturning --- evaluate a RETURNING list
273 : *
274 : * context: context for the ModifyTable operation
275 : * resultRelInfo: current result rel
276 : * cmdType: operation/merge action performed (INSERT, UPDATE, or DELETE)
277 : * oldSlot: slot holding old tuple deleted or updated
278 : * newSlot: slot holding new tuple inserted or updated
279 : * planSlot: slot holding tuple returned by top subplan node
280 : *
281 : * Note: If oldSlot and newSlot are NULL, the FDW should have already provided
282 : * econtext's scan tuple and its old & new tuples are not needed (FDW direct-
283 : * modify is disabled if the RETURNING list refers to any OLD/NEW values).
284 : *
285 : * Returns a slot holding the result tuple
286 : */
287 : static TupleTableSlot *
288 8120 : ExecProcessReturning(ModifyTableContext *context,
289 : ResultRelInfo *resultRelInfo,
290 : CmdType cmdType,
291 : TupleTableSlot *oldSlot,
292 : TupleTableSlot *newSlot,
293 : TupleTableSlot *planSlot)
294 : {
295 8120 : EState *estate = context->estate;
296 8120 : ProjectionInfo *projectReturning = resultRelInfo->ri_projectReturning;
297 8120 : ExprContext *econtext = projectReturning->pi_exprContext;
298 :
299 : /* Make tuple and any needed join variables available to ExecProject */
300 8120 : switch (cmdType)
301 : {
302 6678 : case CMD_INSERT:
303 : case CMD_UPDATE:
304 : /* return new tuple by default */
305 6678 : if (newSlot)
306 6222 : econtext->ecxt_scantuple = newSlot;
307 6678 : break;
308 :
309 1442 : case CMD_DELETE:
310 : /* return old tuple by default */
311 1442 : if (oldSlot)
312 1204 : econtext->ecxt_scantuple = oldSlot;
313 1442 : break;
314 :
315 0 : default:
316 0 : elog(ERROR, "unrecognized commandType: %d", (int) cmdType);
317 : }
318 8120 : econtext->ecxt_outertuple = planSlot;
319 :
320 : /* Make old/new tuples available to ExecProject, if required */
321 8120 : if (oldSlot)
322 3844 : econtext->ecxt_oldtuple = oldSlot;
323 4276 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
324 184 : econtext->ecxt_oldtuple = ExecGetAllNullSlot(estate, resultRelInfo);
325 : else
326 4092 : econtext->ecxt_oldtuple = NULL; /* No references to OLD columns */
327 :
328 8120 : if (newSlot)
329 6222 : econtext->ecxt_newtuple = newSlot;
330 1898 : else if (projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW)
331 132 : econtext->ecxt_newtuple = ExecGetAllNullSlot(estate, resultRelInfo);
332 : else
333 1766 : econtext->ecxt_newtuple = NULL; /* No references to NEW columns */
334 :
335 : /*
336 : * Tell ExecProject whether or not the OLD/NEW rows actually exist. This
337 : * information is required to evaluate ReturningExpr nodes and also in
338 : * ExecEvalSysVar() and ExecEvalWholeRowVar().
339 : */
340 8120 : if (oldSlot == NULL)
341 4276 : projectReturning->pi_state.flags |= EEO_FLAG_OLD_IS_NULL;
342 : else
343 3844 : projectReturning->pi_state.flags &= ~EEO_FLAG_OLD_IS_NULL;
344 :
345 8120 : if (newSlot == NULL)
346 1898 : projectReturning->pi_state.flags |= EEO_FLAG_NEW_IS_NULL;
347 : else
348 6222 : projectReturning->pi_state.flags &= ~EEO_FLAG_NEW_IS_NULL;
349 :
350 : /* Compute the RETURNING expressions */
351 8120 : return ExecProject(projectReturning);
352 : }
353 :
354 : /*
355 : * ExecCheckTupleVisible -- verify tuple is visible
356 : *
357 : * It would not be consistent with guarantees of the higher isolation levels to
358 : * proceed with avoiding insertion (taking speculative insertion's alternative
359 : * path) on the basis of another tuple that is not visible to MVCC snapshot.
360 : * Check for the need to raise a serialization failure, and do so as necessary.
361 : */
362 : static void
363 5256 : ExecCheckTupleVisible(EState *estate,
364 : Relation rel,
365 : TupleTableSlot *slot)
366 : {
367 5256 : if (!IsolationUsesXactSnapshot())
368 5192 : return;
369 :
370 64 : if (!table_tuple_satisfies_snapshot(rel, slot, estate->es_snapshot))
371 : {
372 : Datum xminDatum;
373 : TransactionId xmin;
374 : bool isnull;
375 :
376 40 : xminDatum = slot_getsysattr(slot, MinTransactionIdAttributeNumber, &isnull);
377 : Assert(!isnull);
378 40 : xmin = DatumGetTransactionId(xminDatum);
379 :
380 : /*
381 : * We should not raise a serialization failure if the conflict is
382 : * against a tuple inserted by our own transaction, even if it's not
383 : * visible to our snapshot. (This would happen, for example, if
384 : * conflicting keys are proposed for insertion in a single command.)
385 : */
386 40 : if (!TransactionIdIsCurrentTransactionId(xmin))
387 20 : ereport(ERROR,
388 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
389 : errmsg("could not serialize access due to concurrent update")));
390 : }
391 : }
392 :
393 : /*
394 : * ExecCheckTIDVisible -- convenience variant of ExecCheckTupleVisible()
395 : */
396 : static void
397 224 : ExecCheckTIDVisible(EState *estate,
398 : ResultRelInfo *relinfo,
399 : ItemPointer tid,
400 : TupleTableSlot *tempSlot)
401 : {
402 224 : Relation rel = relinfo->ri_RelationDesc;
403 :
404 : /* Redundantly check isolation level */
405 224 : if (!IsolationUsesXactSnapshot())
406 160 : return;
407 :
408 64 : if (!table_tuple_fetch_row_version(rel, tid, SnapshotAny, tempSlot))
409 0 : elog(ERROR, "failed to fetch conflicting tuple for ON CONFLICT");
410 64 : ExecCheckTupleVisible(estate, rel, tempSlot);
411 44 : ExecClearTuple(tempSlot);
412 : }
413 :
414 : /*
415 : * Initialize generated columns handling for a tuple
416 : *
417 : * This fills the resultRelInfo's ri_GeneratedExprsI/ri_NumGeneratedNeededI or
418 : * ri_GeneratedExprsU/ri_NumGeneratedNeededU fields, depending on cmdtype.
419 : * This is used only for stored generated columns.
420 : *
421 : * If cmdType == CMD_UPDATE, the ri_extraUpdatedCols field is filled too.
422 : * This is used by both stored and virtual generated columns.
423 : *
424 : * Note: usually, a given query would need only one of ri_GeneratedExprsI and
425 : * ri_GeneratedExprsU per result rel; but MERGE can need both, and so can
426 : * cross-partition UPDATEs, since a partition might be the target of both
427 : * UPDATE and INSERT actions.
428 : */
429 : void
430 59694 : ExecInitGenerated(ResultRelInfo *resultRelInfo,
431 : EState *estate,
432 : CmdType cmdtype)
433 : {
434 59694 : Relation rel = resultRelInfo->ri_RelationDesc;
435 59694 : TupleDesc tupdesc = RelationGetDescr(rel);
436 59694 : int natts = tupdesc->natts;
437 : ExprState **ri_GeneratedExprs;
438 : int ri_NumGeneratedNeeded;
439 : Bitmapset *updatedCols;
440 : MemoryContext oldContext;
441 :
442 : /* Nothing to do if no generated columns */
443 59694 : if (!(tupdesc->constr && (tupdesc->constr->has_generated_stored || tupdesc->constr->has_generated_virtual)))
444 58614 : return;
445 :
446 : /*
447 : * In an UPDATE, we can skip computing any generated columns that do not
448 : * depend on any UPDATE target column. But if there is a BEFORE ROW
449 : * UPDATE trigger, we cannot skip because the trigger might change more
450 : * columns.
451 : */
452 1080 : if (cmdtype == CMD_UPDATE &&
453 266 : !(rel->trigdesc && rel->trigdesc->trig_update_before_row))
454 222 : updatedCols = ExecGetUpdatedCols(resultRelInfo, estate);
455 : else
456 858 : updatedCols = NULL;
457 :
458 : /*
459 : * Make sure these data structures are built in the per-query memory
460 : * context so they'll survive throughout the query.
461 : */
462 1080 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
463 :
464 1080 : ri_GeneratedExprs = (ExprState **) palloc0(natts * sizeof(ExprState *));
465 1080 : ri_NumGeneratedNeeded = 0;
466 :
467 4208 : for (int i = 0; i < natts; i++)
468 : {
469 3134 : char attgenerated = TupleDescAttr(tupdesc, i)->attgenerated;
470 :
471 3134 : if (attgenerated)
472 : {
473 : Expr *expr;
474 :
475 : /* Fetch the GENERATED AS expression tree */
476 1164 : expr = (Expr *) build_column_default(rel, i + 1);
477 1164 : if (expr == NULL)
478 0 : elog(ERROR, "no generation expression found for column number %d of table \"%s\"",
479 : i + 1, RelationGetRelationName(rel));
480 :
481 : /*
482 : * If it's an update with a known set of update target columns,
483 : * see if we can skip the computation.
484 : */
485 1164 : if (updatedCols)
486 : {
487 236 : Bitmapset *attrs_used = NULL;
488 :
489 236 : pull_varattnos((Node *) expr, 1, &attrs_used);
490 :
491 236 : if (!bms_overlap(updatedCols, attrs_used))
492 24 : continue; /* need not update this column */
493 : }
494 :
495 : /* No luck, so prepare the expression for execution */
496 1140 : if (attgenerated == ATTRIBUTE_GENERATED_STORED)
497 : {
498 1056 : ri_GeneratedExprs[i] = ExecPrepareExpr(expr, estate);
499 1050 : ri_NumGeneratedNeeded++;
500 : }
501 :
502 : /* If UPDATE, mark column in resultRelInfo->ri_extraUpdatedCols */
503 1134 : if (cmdtype == CMD_UPDATE)
504 264 : resultRelInfo->ri_extraUpdatedCols =
505 264 : bms_add_member(resultRelInfo->ri_extraUpdatedCols,
506 : i + 1 - FirstLowInvalidHeapAttributeNumber);
507 : }
508 : }
509 :
510 1074 : if (ri_NumGeneratedNeeded == 0)
511 : {
512 : /* didn't need it after all */
513 42 : pfree(ri_GeneratedExprs);
514 42 : ri_GeneratedExprs = NULL;
515 : }
516 :
517 : /* Save in appropriate set of fields */
518 1074 : if (cmdtype == CMD_UPDATE)
519 : {
520 : /* Don't call twice */
521 : Assert(resultRelInfo->ri_GeneratedExprsU == NULL);
522 :
523 266 : resultRelInfo->ri_GeneratedExprsU = ri_GeneratedExprs;
524 266 : resultRelInfo->ri_NumGeneratedNeededU = ri_NumGeneratedNeeded;
525 :
526 266 : resultRelInfo->ri_extraUpdatedCols_valid = true;
527 : }
528 : else
529 : {
530 : /* Don't call twice */
531 : Assert(resultRelInfo->ri_GeneratedExprsI == NULL);
532 :
533 808 : resultRelInfo->ri_GeneratedExprsI = ri_GeneratedExprs;
534 808 : resultRelInfo->ri_NumGeneratedNeededI = ri_NumGeneratedNeeded;
535 : }
536 :
537 1074 : MemoryContextSwitchTo(oldContext);
538 : }
539 :
540 : /*
541 : * Compute stored generated columns for a tuple
542 : */
543 : void
544 1436 : ExecComputeStoredGenerated(ResultRelInfo *resultRelInfo,
545 : EState *estate, TupleTableSlot *slot,
546 : CmdType cmdtype)
547 : {
548 1436 : Relation rel = resultRelInfo->ri_RelationDesc;
549 1436 : TupleDesc tupdesc = RelationGetDescr(rel);
550 1436 : int natts = tupdesc->natts;
551 1436 : ExprContext *econtext = GetPerTupleExprContext(estate);
552 : ExprState **ri_GeneratedExprs;
553 : MemoryContext oldContext;
554 : Datum *values;
555 : bool *nulls;
556 :
557 : /* We should not be called unless this is true */
558 : Assert(tupdesc->constr && tupdesc->constr->has_generated_stored);
559 :
560 : /*
561 : * Initialize the expressions if we didn't already, and check whether we
562 : * can exit early because nothing needs to be computed.
563 : */
564 1436 : if (cmdtype == CMD_UPDATE)
565 : {
566 280 : if (resultRelInfo->ri_GeneratedExprsU == NULL)
567 216 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
568 280 : if (resultRelInfo->ri_NumGeneratedNeededU == 0)
569 18 : return;
570 262 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsU;
571 : }
572 : else
573 : {
574 1156 : if (resultRelInfo->ri_GeneratedExprsI == NULL)
575 814 : ExecInitGenerated(resultRelInfo, estate, cmdtype);
576 : /* Early exit is impossible given the prior Assert */
577 : Assert(resultRelInfo->ri_NumGeneratedNeededI > 0);
578 1150 : ri_GeneratedExprs = resultRelInfo->ri_GeneratedExprsI;
579 : }
580 :
581 1412 : oldContext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
582 :
583 1412 : values = palloc(sizeof(*values) * natts);
584 1412 : nulls = palloc(sizeof(*nulls) * natts);
585 :
586 1412 : slot_getallattrs(slot);
587 1412 : memcpy(nulls, slot->tts_isnull, sizeof(*nulls) * natts);
588 :
589 5354 : for (int i = 0; i < natts; i++)
590 : {
591 3966 : CompactAttribute *attr = TupleDescCompactAttr(tupdesc, i);
592 :
593 3966 : if (ri_GeneratedExprs[i])
594 : {
595 : Datum val;
596 : bool isnull;
597 :
598 : Assert(TupleDescAttr(tupdesc, i)->attgenerated == ATTRIBUTE_GENERATED_STORED);
599 :
600 1434 : econtext->ecxt_scantuple = slot;
601 :
602 1434 : val = ExecEvalExpr(ri_GeneratedExprs[i], econtext, &isnull);
603 :
604 : /*
605 : * We must make a copy of val as we have no guarantees about where
606 : * memory for a pass-by-reference Datum is located.
607 : */
608 1410 : if (!isnull)
609 1362 : val = datumCopy(val, attr->attbyval, attr->attlen);
610 :
611 1410 : values[i] = val;
612 1410 : nulls[i] = isnull;
613 : }
614 : else
615 : {
616 2532 : if (!nulls[i])
617 2384 : values[i] = datumCopy(slot->tts_values[i], attr->attbyval, attr->attlen);
618 : }
619 : }
620 :
621 1388 : ExecClearTuple(slot);
622 1388 : memcpy(slot->tts_values, values, sizeof(*values) * natts);
623 1388 : memcpy(slot->tts_isnull, nulls, sizeof(*nulls) * natts);
624 1388 : ExecStoreVirtualTuple(slot);
625 1388 : ExecMaterializeSlot(slot);
626 :
627 1388 : MemoryContextSwitchTo(oldContext);
628 : }
629 :
630 : /*
631 : * ExecInitInsertProjection
632 : * Do one-time initialization of projection data for INSERT tuples.
633 : *
634 : * INSERT queries may need a projection to filter out junk attrs in the tlist.
635 : *
636 : * This is also a convenient place to verify that the
637 : * output of an INSERT matches the target table.
638 : */
639 : static void
640 91040 : ExecInitInsertProjection(ModifyTableState *mtstate,
641 : ResultRelInfo *resultRelInfo)
642 : {
643 91040 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
644 91040 : Plan *subplan = outerPlan(node);
645 91040 : EState *estate = mtstate->ps.state;
646 91040 : List *insertTargetList = NIL;
647 91040 : bool need_projection = false;
648 : ListCell *l;
649 :
650 : /* Extract non-junk columns of the subplan's result tlist. */
651 281916 : foreach(l, subplan->targetlist)
652 : {
653 190876 : TargetEntry *tle = (TargetEntry *) lfirst(l);
654 :
655 190876 : if (!tle->resjunk)
656 190876 : insertTargetList = lappend(insertTargetList, tle);
657 : else
658 0 : need_projection = true;
659 : }
660 :
661 : /*
662 : * The junk-free list must produce a tuple suitable for the result
663 : * relation.
664 : */
665 91040 : ExecCheckPlanOutput(resultRelInfo->ri_RelationDesc, insertTargetList);
666 :
667 : /* We'll need a slot matching the table's format. */
668 91040 : resultRelInfo->ri_newTupleSlot =
669 91040 : table_slot_create(resultRelInfo->ri_RelationDesc,
670 : &estate->es_tupleTable);
671 :
672 : /* Build ProjectionInfo if needed (it probably isn't). */
673 91040 : if (need_projection)
674 : {
675 0 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
676 :
677 : /* need an expression context to do the projection */
678 0 : if (mtstate->ps.ps_ExprContext == NULL)
679 0 : ExecAssignExprContext(estate, &mtstate->ps);
680 :
681 0 : resultRelInfo->ri_projectNew =
682 0 : ExecBuildProjectionInfo(insertTargetList,
683 : mtstate->ps.ps_ExprContext,
684 : resultRelInfo->ri_newTupleSlot,
685 : &mtstate->ps,
686 : relDesc);
687 : }
688 :
689 91040 : resultRelInfo->ri_projectNewInfoValid = true;
690 91040 : }
691 :
692 : /*
693 : * ExecInitUpdateProjection
694 : * Do one-time initialization of projection data for UPDATE tuples.
695 : *
696 : * UPDATE always needs a projection, because (1) there's always some junk
697 : * attrs, and (2) we may need to merge values of not-updated columns from
698 : * the old tuple into the final tuple. In UPDATE, the tuple arriving from
699 : * the subplan contains only new values for the changed columns, plus row
700 : * identity info in the junk attrs.
701 : *
702 : * This is "one-time" for any given result rel, but we might touch more than
703 : * one result rel in the course of an inherited UPDATE, and each one needs
704 : * its own projection due to possible column order variation.
705 : *
706 : * This is also a convenient place to verify that the output of an UPDATE
707 : * matches the target table (ExecBuildUpdateProjection does that).
708 : */
709 : static void
710 13530 : ExecInitUpdateProjection(ModifyTableState *mtstate,
711 : ResultRelInfo *resultRelInfo)
712 : {
713 13530 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
714 13530 : Plan *subplan = outerPlan(node);
715 13530 : EState *estate = mtstate->ps.state;
716 13530 : TupleDesc relDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
717 : int whichrel;
718 : List *updateColnos;
719 :
720 : /*
721 : * Usually, mt_lastResultIndex matches the target rel. If it happens not
722 : * to, we can get the index the hard way with an integer division.
723 : */
724 13530 : whichrel = mtstate->mt_lastResultIndex;
725 13530 : if (resultRelInfo != mtstate->resultRelInfo + whichrel)
726 : {
727 0 : whichrel = resultRelInfo - mtstate->resultRelInfo;
728 : Assert(whichrel >= 0 && whichrel < mtstate->mt_nrels);
729 : }
730 :
731 13530 : updateColnos = (List *) list_nth(mtstate->mt_updateColnosLists, whichrel);
732 :
733 : /*
734 : * For UPDATE, we use the old tuple to fill up missing values in the tuple
735 : * produced by the subplan to get the new tuple. We need two slots, both
736 : * matching the table's desired format.
737 : */
738 13530 : resultRelInfo->ri_oldTupleSlot =
739 13530 : table_slot_create(resultRelInfo->ri_RelationDesc,
740 : &estate->es_tupleTable);
741 13530 : resultRelInfo->ri_newTupleSlot =
742 13530 : table_slot_create(resultRelInfo->ri_RelationDesc,
743 : &estate->es_tupleTable);
744 :
745 : /* need an expression context to do the projection */
746 13530 : if (mtstate->ps.ps_ExprContext == NULL)
747 12132 : ExecAssignExprContext(estate, &mtstate->ps);
748 :
749 13530 : resultRelInfo->ri_projectNew =
750 13530 : ExecBuildUpdateProjection(subplan->targetlist,
751 : false, /* subplan did the evaluation */
752 : updateColnos,
753 : relDesc,
754 : mtstate->ps.ps_ExprContext,
755 : resultRelInfo->ri_newTupleSlot,
756 : &mtstate->ps);
757 :
758 13530 : resultRelInfo->ri_projectNewInfoValid = true;
759 13530 : }
760 :
761 : /*
762 : * ExecGetInsertNewTuple
763 : * This prepares a "new" tuple ready to be inserted into given result
764 : * relation, by removing any junk columns of the plan's output tuple
765 : * and (if necessary) coercing the tuple to the right tuple format.
766 : */
767 : static TupleTableSlot *
768 12267604 : ExecGetInsertNewTuple(ResultRelInfo *relinfo,
769 : TupleTableSlot *planSlot)
770 : {
771 12267604 : ProjectionInfo *newProj = relinfo->ri_projectNew;
772 : ExprContext *econtext;
773 :
774 : /*
775 : * If there's no projection to be done, just make sure the slot is of the
776 : * right type for the target rel. If the planSlot is the right type we
777 : * can use it as-is, else copy the data into ri_newTupleSlot.
778 : */
779 12267604 : if (newProj == NULL)
780 : {
781 12267604 : if (relinfo->ri_newTupleSlot->tts_ops != planSlot->tts_ops)
782 : {
783 11460158 : ExecCopySlot(relinfo->ri_newTupleSlot, planSlot);
784 11460158 : return relinfo->ri_newTupleSlot;
785 : }
786 : else
787 807446 : return planSlot;
788 : }
789 :
790 : /*
791 : * Else project; since the projection output slot is ri_newTupleSlot, this
792 : * will also fix any slot-type problem.
793 : *
794 : * Note: currently, this is dead code, because INSERT cases don't receive
795 : * any junk columns so there's never a projection to be done.
796 : */
797 0 : econtext = newProj->pi_exprContext;
798 0 : econtext->ecxt_outertuple = planSlot;
799 0 : return ExecProject(newProj);
800 : }
801 :
802 : /*
803 : * ExecGetUpdateNewTuple
804 : * This prepares a "new" tuple by combining an UPDATE subplan's output
805 : * tuple (which contains values of changed columns) with unchanged
806 : * columns taken from the old tuple.
807 : *
808 : * The subplan tuple might also contain junk columns, which are ignored.
809 : * Note that the projection also ensures we have a slot of the right type.
810 : */
811 : TupleTableSlot *
812 318000 : ExecGetUpdateNewTuple(ResultRelInfo *relinfo,
813 : TupleTableSlot *planSlot,
814 : TupleTableSlot *oldSlot)
815 : {
816 318000 : ProjectionInfo *newProj = relinfo->ri_projectNew;
817 : ExprContext *econtext;
818 :
819 : /* Use a few extra Asserts to protect against outside callers */
820 : Assert(relinfo->ri_projectNewInfoValid);
821 : Assert(planSlot != NULL && !TTS_EMPTY(planSlot));
822 : Assert(oldSlot != NULL && !TTS_EMPTY(oldSlot));
823 :
824 318000 : econtext = newProj->pi_exprContext;
825 318000 : econtext->ecxt_outertuple = planSlot;
826 318000 : econtext->ecxt_scantuple = oldSlot;
827 318000 : return ExecProject(newProj);
828 : }
829 :
830 : /* ----------------------------------------------------------------
831 : * ExecInsert
832 : *
833 : * For INSERT, we have to insert the tuple into the target relation
834 : * (or partition thereof) and insert appropriate tuples into the index
835 : * relations.
836 : *
837 : * slot contains the new tuple value to be stored.
838 : *
839 : * Returns RETURNING result if any, otherwise NULL.
840 : * *inserted_tuple is the tuple that's effectively inserted;
841 : * *insert_destrel is the relation where it was inserted.
842 : * These are only set on success.
843 : *
844 : * This may change the currently active tuple conversion map in
845 : * mtstate->mt_transition_capture, so the callers must take care to
846 : * save the previous value to avoid losing track of it.
847 : * ----------------------------------------------------------------
848 : */
849 : static TupleTableSlot *
850 12270416 : ExecInsert(ModifyTableContext *context,
851 : ResultRelInfo *resultRelInfo,
852 : TupleTableSlot *slot,
853 : bool canSetTag,
854 : TupleTableSlot **inserted_tuple,
855 : ResultRelInfo **insert_destrel)
856 : {
857 12270416 : ModifyTableState *mtstate = context->mtstate;
858 12270416 : EState *estate = context->estate;
859 : Relation resultRelationDesc;
860 12270416 : List *recheckIndexes = NIL;
861 12270416 : TupleTableSlot *planSlot = context->planSlot;
862 12270416 : TupleTableSlot *result = NULL;
863 : TransitionCaptureState *ar_insert_trig_tcs;
864 12270416 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
865 12270416 : OnConflictAction onconflict = node->onConflictAction;
866 12270416 : PartitionTupleRouting *proute = mtstate->mt_partition_tuple_routing;
867 : MemoryContext oldContext;
868 :
869 : /*
870 : * If the input result relation is a partitioned table, find the leaf
871 : * partition to insert the tuple into.
872 : */
873 12270416 : if (proute)
874 : {
875 : ResultRelInfo *partRelInfo;
876 :
877 758696 : slot = ExecPrepareTupleRouting(mtstate, estate, proute,
878 : resultRelInfo, slot,
879 : &partRelInfo);
880 758474 : resultRelInfo = partRelInfo;
881 : }
882 :
883 12270194 : ExecMaterializeSlot(slot);
884 :
885 12270194 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
886 :
887 : /*
888 : * Open the table's indexes, if we have not done so already, so that we
889 : * can add new index entries for the inserted tuple.
890 : */
891 12270194 : if (resultRelationDesc->rd_rel->relhasindex &&
892 3102404 : resultRelInfo->ri_IndexRelationDescs == NULL)
893 32128 : ExecOpenIndices(resultRelInfo, onconflict != ONCONFLICT_NONE);
894 :
895 : /*
896 : * BEFORE ROW INSERT Triggers.
897 : *
898 : * Note: We fire BEFORE ROW TRIGGERS for every attempted insertion in an
899 : * INSERT ... ON CONFLICT statement. We cannot check for constraint
900 : * violations before firing these triggers, because they can change the
901 : * values to insert. Also, they can run arbitrary user-defined code with
902 : * side-effects that we can't cancel by just not inserting the tuple.
903 : */
904 12270194 : if (resultRelInfo->ri_TrigDesc &&
905 75370 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)
906 : {
907 : /* Flush any pending inserts, so rows are visible to the triggers */
908 2132 : if (estate->es_insert_pending_result_relations != NIL)
909 6 : ExecPendingInserts(estate);
910 :
911 2132 : if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
912 200 : return NULL; /* "do nothing" */
913 : }
914 :
915 : /* INSTEAD OF ROW INSERT Triggers */
916 12269896 : if (resultRelInfo->ri_TrigDesc &&
917 75072 : resultRelInfo->ri_TrigDesc->trig_insert_instead_row)
918 : {
919 168 : if (!ExecIRInsertTriggers(estate, resultRelInfo, slot))
920 6 : return NULL; /* "do nothing" */
921 : }
922 12269728 : else if (resultRelInfo->ri_FdwRoutine)
923 : {
924 : /*
925 : * GENERATED expressions might reference the tableoid column, so
926 : * (re-)initialize tts_tableOid before evaluating them.
927 : */
928 2020 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
929 :
930 : /*
931 : * Compute stored generated columns
932 : */
933 2020 : if (resultRelationDesc->rd_att->constr &&
934 358 : resultRelationDesc->rd_att->constr->has_generated_stored)
935 8 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
936 : CMD_INSERT);
937 :
938 : /*
939 : * If the FDW supports batching, and batching is requested, accumulate
940 : * rows and insert them in batches. Otherwise use the per-row inserts.
941 : */
942 2020 : if (resultRelInfo->ri_BatchSize > 1)
943 : {
944 290 : bool flushed = false;
945 :
946 : /*
947 : * When we've reached the desired batch size, perform the
948 : * insertion.
949 : */
950 290 : if (resultRelInfo->ri_NumSlots == resultRelInfo->ri_BatchSize)
951 : {
952 20 : ExecBatchInsert(mtstate, resultRelInfo,
953 : resultRelInfo->ri_Slots,
954 : resultRelInfo->ri_PlanSlots,
955 : resultRelInfo->ri_NumSlots,
956 : estate, canSetTag);
957 20 : flushed = true;
958 : }
959 :
960 290 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
961 :
962 290 : if (resultRelInfo->ri_Slots == NULL)
963 : {
964 60 : resultRelInfo->ri_Slots = palloc(sizeof(TupleTableSlot *) *
965 30 : resultRelInfo->ri_BatchSize);
966 30 : resultRelInfo->ri_PlanSlots = palloc(sizeof(TupleTableSlot *) *
967 30 : resultRelInfo->ri_BatchSize);
968 : }
969 :
970 : /*
971 : * Initialize the batch slots. We don't know how many slots will
972 : * be needed, so we initialize them as the batch grows, and we
973 : * keep them across batches. To mitigate an inefficiency in how
974 : * resource owner handles objects with many references (as with
975 : * many slots all referencing the same tuple descriptor) we copy
976 : * the appropriate tuple descriptor for each slot.
977 : */
978 290 : if (resultRelInfo->ri_NumSlots >= resultRelInfo->ri_NumSlotsInitialized)
979 : {
980 144 : TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
981 : TupleDesc plan_tdesc =
982 144 : CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
983 :
984 288 : resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
985 144 : MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
986 :
987 288 : resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots] =
988 144 : MakeSingleTupleTableSlot(plan_tdesc, planSlot->tts_ops);
989 :
990 : /* remember how many batch slots we initialized */
991 144 : resultRelInfo->ri_NumSlotsInitialized++;
992 : }
993 :
994 290 : ExecCopySlot(resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots],
995 : slot);
996 :
997 290 : ExecCopySlot(resultRelInfo->ri_PlanSlots[resultRelInfo->ri_NumSlots],
998 : planSlot);
999 :
1000 : /*
1001 : * If these are the first tuples stored in the buffers, add the
1002 : * target rel and the mtstate to the
1003 : * es_insert_pending_result_relations and
1004 : * es_insert_pending_modifytables lists respectively, except in
1005 : * the case where flushing was done above, in which case they
1006 : * would already have been added to the lists, so no need to do
1007 : * this.
1008 : */
1009 290 : if (resultRelInfo->ri_NumSlots == 0 && !flushed)
1010 : {
1011 : Assert(!list_member_ptr(estate->es_insert_pending_result_relations,
1012 : resultRelInfo));
1013 38 : estate->es_insert_pending_result_relations =
1014 38 : lappend(estate->es_insert_pending_result_relations,
1015 : resultRelInfo);
1016 38 : estate->es_insert_pending_modifytables =
1017 38 : lappend(estate->es_insert_pending_modifytables, mtstate);
1018 : }
1019 : Assert(list_member_ptr(estate->es_insert_pending_result_relations,
1020 : resultRelInfo));
1021 :
1022 290 : resultRelInfo->ri_NumSlots++;
1023 :
1024 290 : MemoryContextSwitchTo(oldContext);
1025 :
1026 290 : return NULL;
1027 : }
1028 :
1029 : /*
1030 : * insert into foreign table: let the FDW do it
1031 : */
1032 1730 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignInsert(estate,
1033 : resultRelInfo,
1034 : slot,
1035 : planSlot);
1036 :
1037 1724 : if (slot == NULL) /* "do nothing" */
1038 4 : return NULL;
1039 :
1040 : /*
1041 : * AFTER ROW Triggers or RETURNING expressions might reference the
1042 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
1043 : * them. (This covers the case where the FDW replaced the slot.)
1044 : */
1045 1720 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1046 : }
1047 : else
1048 : {
1049 : WCOKind wco_kind;
1050 :
1051 : /*
1052 : * Constraints and GENERATED expressions might reference the tableoid
1053 : * column, so (re-)initialize tts_tableOid before evaluating them.
1054 : */
1055 12267708 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1056 :
1057 : /*
1058 : * Compute stored generated columns
1059 : */
1060 12267708 : if (resultRelationDesc->rd_att->constr &&
1061 3719972 : resultRelationDesc->rd_att->constr->has_generated_stored)
1062 1106 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
1063 : CMD_INSERT);
1064 :
1065 : /*
1066 : * Check any RLS WITH CHECK policies.
1067 : *
1068 : * Normally we should check INSERT policies. But if the insert is the
1069 : * result of a partition key update that moved the tuple to a new
1070 : * partition, we should instead check UPDATE policies, because we are
1071 : * executing policies defined on the target table, and not those
1072 : * defined on the child partitions.
1073 : *
1074 : * If we're running MERGE, we refer to the action that we're executing
1075 : * to know if we're doing an INSERT or UPDATE to a partition table.
1076 : */
1077 12267678 : if (mtstate->operation == CMD_UPDATE)
1078 792 : wco_kind = WCO_RLS_UPDATE_CHECK;
1079 12266886 : else if (mtstate->operation == CMD_MERGE)
1080 1780 : wco_kind = (mtstate->mt_merge_action->mas_action->commandType == CMD_UPDATE) ?
1081 1780 : WCO_RLS_UPDATE_CHECK : WCO_RLS_INSERT_CHECK;
1082 : else
1083 12265106 : wco_kind = WCO_RLS_INSERT_CHECK;
1084 :
1085 : /*
1086 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
1087 : * we are looking for at this point.
1088 : */
1089 12267678 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1090 666 : ExecWithCheckOptions(wco_kind, resultRelInfo, slot, estate);
1091 :
1092 : /*
1093 : * Check the constraints of the tuple.
1094 : */
1095 12267486 : if (resultRelationDesc->rd_att->constr)
1096 3719840 : ExecConstraints(resultRelInfo, slot, estate);
1097 :
1098 : /*
1099 : * Also check the tuple against the partition constraint, if there is
1100 : * one; except that if we got here via tuple-routing, we don't need to
1101 : * if there's no BR trigger defined on the partition.
1102 : */
1103 12266780 : if (resultRelationDesc->rd_rel->relispartition &&
1104 761098 : (resultRelInfo->ri_RootResultRelInfo == NULL ||
1105 757880 : (resultRelInfo->ri_TrigDesc &&
1106 1586 : resultRelInfo->ri_TrigDesc->trig_insert_before_row)))
1107 3414 : ExecPartitionCheck(resultRelInfo, slot, estate, true);
1108 :
1109 12266612 : if (onconflict != ONCONFLICT_NONE && resultRelInfo->ri_NumIndices > 0)
1110 4142 : {
1111 : /* Perform a speculative insertion. */
1112 : uint32 specToken;
1113 : ItemPointerData conflictTid;
1114 : ItemPointerData invalidItemPtr;
1115 : bool specConflict;
1116 : List *arbiterIndexes;
1117 :
1118 9594 : ItemPointerSetInvalid(&invalidItemPtr);
1119 9594 : arbiterIndexes = resultRelInfo->ri_onConflictArbiterIndexes;
1120 :
1121 : /*
1122 : * Do a non-conclusive check for conflicts first.
1123 : *
1124 : * We're not holding any locks yet, so this doesn't guarantee that
1125 : * the later insert won't conflict. But it avoids leaving behind
1126 : * a lot of canceled speculative insertions, if you run a lot of
1127 : * INSERT ON CONFLICT statements that do conflict.
1128 : *
1129 : * We loop back here if we find a conflict below, either during
1130 : * the pre-check, or when we re-check after inserting the tuple
1131 : * speculatively. Better allow interrupts in case some bug makes
1132 : * this an infinite loop.
1133 : */
1134 10 : vlock:
1135 9604 : CHECK_FOR_INTERRUPTS();
1136 9604 : specConflict = false;
1137 9604 : if (!ExecCheckIndexConstraints(resultRelInfo, slot, estate,
1138 : &conflictTid, &invalidItemPtr,
1139 : arbiterIndexes))
1140 : {
1141 : /* committed conflict tuple found */
1142 5440 : if (onconflict == ONCONFLICT_UPDATE)
1143 : {
1144 : /*
1145 : * In case of ON CONFLICT DO UPDATE, execute the UPDATE
1146 : * part. Be prepared to retry if the UPDATE fails because
1147 : * of another concurrent UPDATE/DELETE to the conflict
1148 : * tuple.
1149 : */
1150 5216 : TupleTableSlot *returning = NULL;
1151 :
1152 5216 : if (ExecOnConflictUpdate(context, resultRelInfo,
1153 : &conflictTid, slot, canSetTag,
1154 : &returning))
1155 : {
1156 5138 : InstrCountTuples2(&mtstate->ps, 1);
1157 5138 : return returning;
1158 : }
1159 : else
1160 0 : goto vlock;
1161 : }
1162 : else
1163 : {
1164 : /*
1165 : * In case of ON CONFLICT DO NOTHING, do nothing. However,
1166 : * verify that the tuple is visible to the executor's MVCC
1167 : * snapshot at higher isolation levels.
1168 : *
1169 : * Using ExecGetReturningSlot() to store the tuple for the
1170 : * recheck isn't that pretty, but we can't trivially use
1171 : * the input slot, because it might not be of a compatible
1172 : * type. As there's no conflicting usage of
1173 : * ExecGetReturningSlot() in the DO NOTHING case...
1174 : */
1175 : Assert(onconflict == ONCONFLICT_NOTHING);
1176 224 : ExecCheckTIDVisible(estate, resultRelInfo, &conflictTid,
1177 : ExecGetReturningSlot(estate, resultRelInfo));
1178 204 : InstrCountTuples2(&mtstate->ps, 1);
1179 204 : return NULL;
1180 : }
1181 : }
1182 :
1183 : /*
1184 : * Before we start insertion proper, acquire our "speculative
1185 : * insertion lock". Others can use that to wait for us to decide
1186 : * if we're going to go ahead with the insertion, instead of
1187 : * waiting for the whole transaction to complete.
1188 : */
1189 4158 : specToken = SpeculativeInsertionLockAcquire(GetCurrentTransactionId());
1190 :
1191 : /* insert the tuple, with the speculative token */
1192 4158 : table_tuple_insert_speculative(resultRelationDesc, slot,
1193 : estate->es_output_cid,
1194 : 0,
1195 : NULL,
1196 : specToken);
1197 :
1198 : /* insert index entries for tuple */
1199 4158 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1200 : slot, estate, false, true,
1201 : &specConflict,
1202 : arbiterIndexes,
1203 : false);
1204 :
1205 : /* adjust the tuple's state accordingly */
1206 4152 : table_tuple_complete_speculative(resultRelationDesc, slot,
1207 4152 : specToken, !specConflict);
1208 :
1209 : /*
1210 : * Wake up anyone waiting for our decision. They will re-check
1211 : * the tuple, see that it's no longer speculative, and wait on our
1212 : * XID as if this was a regularly inserted tuple all along. Or if
1213 : * we killed the tuple, they will see it's dead, and proceed as if
1214 : * the tuple never existed.
1215 : */
1216 4152 : SpeculativeInsertionLockRelease(GetCurrentTransactionId());
1217 :
1218 : /*
1219 : * If there was a conflict, start from the beginning. We'll do
1220 : * the pre-check again, which will now find the conflicting tuple
1221 : * (unless it aborts before we get there).
1222 : */
1223 4152 : if (specConflict)
1224 : {
1225 10 : list_free(recheckIndexes);
1226 10 : goto vlock;
1227 : }
1228 :
1229 : /* Since there was no insertion conflict, we're done */
1230 : }
1231 : else
1232 : {
1233 : /* insert the tuple normally */
1234 12257018 : table_tuple_insert(resultRelationDesc, slot,
1235 : estate->es_output_cid,
1236 : 0, NULL);
1237 :
1238 : /* insert index entries for tuple */
1239 12256982 : if (resultRelInfo->ri_NumIndices > 0)
1240 3092242 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
1241 : slot, estate, false,
1242 : false, NULL, NIL,
1243 : false);
1244 : }
1245 : }
1246 :
1247 12262404 : if (canSetTag)
1248 12261226 : (estate->es_processed)++;
1249 :
1250 : /*
1251 : * If this insert is the result of a partition key update that moved the
1252 : * tuple to a new partition, put this row into the transition NEW TABLE,
1253 : * if there is one. We need to do this separately for DELETE and INSERT
1254 : * because they happen on different tables.
1255 : */
1256 12262404 : ar_insert_trig_tcs = mtstate->mt_transition_capture;
1257 12262404 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
1258 54 : && mtstate->mt_transition_capture->tcs_update_new_table)
1259 : {
1260 48 : ExecARUpdateTriggers(estate, resultRelInfo,
1261 : NULL, NULL,
1262 : NULL,
1263 : NULL,
1264 : slot,
1265 : NULL,
1266 48 : mtstate->mt_transition_capture,
1267 : false);
1268 :
1269 : /*
1270 : * We've already captured the NEW TABLE row, so make sure any AR
1271 : * INSERT trigger fired below doesn't capture it again.
1272 : */
1273 48 : ar_insert_trig_tcs = NULL;
1274 : }
1275 :
1276 : /* AFTER ROW INSERT Triggers */
1277 12262404 : ExecARInsertTriggers(estate, resultRelInfo, slot, recheckIndexes,
1278 : ar_insert_trig_tcs);
1279 :
1280 12262402 : list_free(recheckIndexes);
1281 :
1282 : /*
1283 : * Check any WITH CHECK OPTION constraints from parent views. We are
1284 : * required to do this after testing all constraints and uniqueness
1285 : * violations per the SQL spec, so we do it after actually inserting the
1286 : * record into the heap and all indexes.
1287 : *
1288 : * ExecWithCheckOptions will elog(ERROR) if a violation is found, so the
1289 : * tuple will never be seen, if it violates the WITH CHECK OPTION.
1290 : *
1291 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
1292 : * are looking for at this point.
1293 : */
1294 12262402 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1295 436 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1296 :
1297 : /* Process RETURNING if present */
1298 12262256 : if (resultRelInfo->ri_projectReturning)
1299 : {
1300 3630 : TupleTableSlot *oldSlot = NULL;
1301 :
1302 : /*
1303 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1304 : * refers to any OLD columns, ExecDelete() will have saved the tuple
1305 : * deleted from the original partition, which we must use here to
1306 : * compute the OLD column values. Otherwise, all OLD column values
1307 : * will be NULL.
1308 : */
1309 3630 : if (context->cpDeletedSlot)
1310 : {
1311 : TupleConversionMap *tupconv_map;
1312 :
1313 : /*
1314 : * Convert the OLD tuple to the new partition's format/slot, if
1315 : * needed. Note that ExecDelete() already converted it to the
1316 : * root's partition's format/slot.
1317 : */
1318 48 : oldSlot = context->cpDeletedSlot;
1319 48 : tupconv_map = ExecGetRootToChildMap(resultRelInfo, estate);
1320 48 : if (tupconv_map != NULL)
1321 : {
1322 16 : oldSlot = execute_attr_map_slot(tupconv_map->attrMap,
1323 : oldSlot,
1324 : ExecGetReturningSlot(estate,
1325 : resultRelInfo));
1326 :
1327 16 : oldSlot->tts_tableOid = context->cpDeletedSlot->tts_tableOid;
1328 16 : ItemPointerCopy(&context->cpDeletedSlot->tts_tid, &oldSlot->tts_tid);
1329 : }
1330 : }
1331 :
1332 3630 : result = ExecProcessReturning(context, resultRelInfo, CMD_INSERT,
1333 : oldSlot, slot, planSlot);
1334 :
1335 : /*
1336 : * For a cross-partition UPDATE, release the old tuple, first making
1337 : * sure that the result slot has a local copy of any pass-by-reference
1338 : * values.
1339 : */
1340 3618 : if (context->cpDeletedSlot)
1341 : {
1342 48 : ExecMaterializeSlot(result);
1343 48 : ExecClearTuple(oldSlot);
1344 48 : if (context->cpDeletedSlot != oldSlot)
1345 16 : ExecClearTuple(context->cpDeletedSlot);
1346 48 : context->cpDeletedSlot = NULL;
1347 : }
1348 : }
1349 :
1350 12262244 : if (inserted_tuple)
1351 822 : *inserted_tuple = slot;
1352 12262244 : if (insert_destrel)
1353 822 : *insert_destrel = resultRelInfo;
1354 :
1355 12262244 : return result;
1356 : }
1357 :
1358 : /* ----------------------------------------------------------------
1359 : * ExecBatchInsert
1360 : *
1361 : * Insert multiple tuples in an efficient way.
1362 : * Currently, this handles inserting into a foreign table without
1363 : * RETURNING clause.
1364 : * ----------------------------------------------------------------
1365 : */
1366 : static void
1367 58 : ExecBatchInsert(ModifyTableState *mtstate,
1368 : ResultRelInfo *resultRelInfo,
1369 : TupleTableSlot **slots,
1370 : TupleTableSlot **planSlots,
1371 : int numSlots,
1372 : EState *estate,
1373 : bool canSetTag)
1374 : {
1375 : int i;
1376 58 : int numInserted = numSlots;
1377 58 : TupleTableSlot *slot = NULL;
1378 : TupleTableSlot **rslots;
1379 :
1380 : /*
1381 : * insert into foreign table: let the FDW do it
1382 : */
1383 58 : rslots = resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert(estate,
1384 : resultRelInfo,
1385 : slots,
1386 : planSlots,
1387 : &numInserted);
1388 :
1389 346 : for (i = 0; i < numInserted; i++)
1390 : {
1391 290 : slot = rslots[i];
1392 :
1393 : /*
1394 : * AFTER ROW Triggers might reference the tableoid column, so
1395 : * (re-)initialize tts_tableOid before evaluating them.
1396 : */
1397 290 : slot->tts_tableOid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1398 :
1399 : /* AFTER ROW INSERT Triggers */
1400 290 : ExecARInsertTriggers(estate, resultRelInfo, slot, NIL,
1401 290 : mtstate->mt_transition_capture);
1402 :
1403 : /*
1404 : * Check any WITH CHECK OPTION constraints from parent views. See the
1405 : * comment in ExecInsert.
1406 : */
1407 288 : if (resultRelInfo->ri_WithCheckOptions != NIL)
1408 0 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot, estate);
1409 : }
1410 :
1411 56 : if (canSetTag && numInserted > 0)
1412 56 : estate->es_processed += numInserted;
1413 :
1414 : /* Clean up all the slots, ready for the next batch */
1415 344 : for (i = 0; i < numSlots; i++)
1416 : {
1417 288 : ExecClearTuple(slots[i]);
1418 288 : ExecClearTuple(planSlots[i]);
1419 : }
1420 56 : resultRelInfo->ri_NumSlots = 0;
1421 56 : }
1422 :
1423 : /*
1424 : * ExecPendingInserts -- flushes all pending inserts to the foreign tables
1425 : */
1426 : static void
1427 36 : ExecPendingInserts(EState *estate)
1428 : {
1429 : ListCell *l1,
1430 : *l2;
1431 :
1432 72 : forboth(l1, estate->es_insert_pending_result_relations,
1433 : l2, estate->es_insert_pending_modifytables)
1434 : {
1435 38 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l1);
1436 38 : ModifyTableState *mtstate = (ModifyTableState *) lfirst(l2);
1437 :
1438 : Assert(mtstate);
1439 38 : ExecBatchInsert(mtstate, resultRelInfo,
1440 : resultRelInfo->ri_Slots,
1441 : resultRelInfo->ri_PlanSlots,
1442 : resultRelInfo->ri_NumSlots,
1443 38 : estate, mtstate->canSetTag);
1444 : }
1445 :
1446 34 : list_free(estate->es_insert_pending_result_relations);
1447 34 : list_free(estate->es_insert_pending_modifytables);
1448 34 : estate->es_insert_pending_result_relations = NIL;
1449 34 : estate->es_insert_pending_modifytables = NIL;
1450 34 : }
1451 :
1452 : /*
1453 : * ExecDeletePrologue -- subroutine for ExecDelete
1454 : *
1455 : * Prepare executor state for DELETE. Actually, the only thing we have to do
1456 : * here is execute BEFORE ROW triggers. We return false if one of them makes
1457 : * the delete a no-op; otherwise, return true.
1458 : */
1459 : static bool
1460 1651496 : ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1461 : ItemPointer tupleid, HeapTuple oldtuple,
1462 : TupleTableSlot **epqreturnslot, TM_Result *result)
1463 : {
1464 1651496 : if (result)
1465 1590 : *result = TM_Ok;
1466 :
1467 : /* BEFORE ROW DELETE triggers */
1468 1651496 : if (resultRelInfo->ri_TrigDesc &&
1469 7062 : resultRelInfo->ri_TrigDesc->trig_delete_before_row)
1470 : {
1471 : /* Flush any pending inserts, so rows are visible to the triggers */
1472 346 : if (context->estate->es_insert_pending_result_relations != NIL)
1473 2 : ExecPendingInserts(context->estate);
1474 :
1475 330 : return ExecBRDeleteTriggers(context->estate, context->epqstate,
1476 : resultRelInfo, tupleid, oldtuple,
1477 : epqreturnslot, result, &context->tmfd,
1478 346 : context->mtstate->operation == CMD_MERGE);
1479 : }
1480 :
1481 1651150 : return true;
1482 : }
1483 :
1484 : /*
1485 : * ExecDeleteAct -- subroutine for ExecDelete
1486 : *
1487 : * Actually delete the tuple from a plain table.
1488 : *
1489 : * Caller is in charge of doing EvalPlanQual as necessary
1490 : */
1491 : static TM_Result
1492 1651318 : ExecDeleteAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1493 : ItemPointer tupleid, bool changingPart)
1494 : {
1495 1651318 : EState *estate = context->estate;
1496 :
1497 1651318 : return table_tuple_delete(resultRelInfo->ri_RelationDesc, tupleid,
1498 : estate->es_output_cid,
1499 : estate->es_snapshot,
1500 : estate->es_crosscheck_snapshot,
1501 : true /* wait for commit */ ,
1502 : &context->tmfd,
1503 : changingPart);
1504 : }
1505 :
1506 : /*
1507 : * ExecDeleteEpilogue -- subroutine for ExecDelete
1508 : *
1509 : * Closing steps of tuple deletion; this invokes AFTER FOR EACH ROW triggers,
1510 : * including the UPDATE triggers if the deletion is being done as part of a
1511 : * cross-partition tuple move.
1512 : */
1513 : static void
1514 1651258 : ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
1515 : ItemPointer tupleid, HeapTuple oldtuple, bool changingPart)
1516 : {
1517 1651258 : ModifyTableState *mtstate = context->mtstate;
1518 1651258 : EState *estate = context->estate;
1519 : TransitionCaptureState *ar_delete_trig_tcs;
1520 :
1521 : /*
1522 : * If this delete is the result of a partition key update that moved the
1523 : * tuple to a new partition, put this row into the transition OLD TABLE,
1524 : * if there is one. We need to do this separately for DELETE and INSERT
1525 : * because they happen on different tables.
1526 : */
1527 1651258 : ar_delete_trig_tcs = mtstate->mt_transition_capture;
1528 1651258 : if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture &&
1529 54 : mtstate->mt_transition_capture->tcs_update_old_table)
1530 : {
1531 48 : ExecARUpdateTriggers(estate, resultRelInfo,
1532 : NULL, NULL,
1533 : tupleid, oldtuple,
1534 48 : NULL, NULL, mtstate->mt_transition_capture,
1535 : false);
1536 :
1537 : /*
1538 : * We've already captured the OLD TABLE row, so make sure any AR
1539 : * DELETE trigger fired below doesn't capture it again.
1540 : */
1541 48 : ar_delete_trig_tcs = NULL;
1542 : }
1543 :
1544 : /* AFTER ROW DELETE Triggers */
1545 1651258 : ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
1546 : ar_delete_trig_tcs, changingPart);
1547 1651254 : }
1548 :
1549 : /* ----------------------------------------------------------------
1550 : * ExecDelete
1551 : *
1552 : * DELETE is like UPDATE, except that we delete the tuple and no
1553 : * index modifications are needed.
1554 : *
1555 : * When deleting from a table, tupleid identifies the tuple to delete and
1556 : * oldtuple is NULL. When deleting through a view INSTEAD OF trigger,
1557 : * oldtuple is passed to the triggers and identifies what to delete, and
1558 : * tupleid is invalid. When deleting from a foreign table, tupleid is
1559 : * invalid; the FDW has to figure out which row to delete using data from
1560 : * the planSlot. oldtuple is passed to foreign table triggers; it is
1561 : * NULL when the foreign table has no relevant triggers. We use
1562 : * tupleDeleted to indicate whether the tuple is actually deleted,
1563 : * callers can use it to decide whether to continue the operation. When
1564 : * this DELETE is a part of an UPDATE of partition-key, then the slot
1565 : * returned by EvalPlanQual() is passed back using output parameter
1566 : * epqreturnslot.
1567 : *
1568 : * Returns RETURNING result if any, otherwise NULL.
1569 : * ----------------------------------------------------------------
1570 : */
1571 : static TupleTableSlot *
1572 1650970 : ExecDelete(ModifyTableContext *context,
1573 : ResultRelInfo *resultRelInfo,
1574 : ItemPointer tupleid,
1575 : HeapTuple oldtuple,
1576 : bool processReturning,
1577 : bool changingPart,
1578 : bool canSetTag,
1579 : TM_Result *tmresult,
1580 : bool *tupleDeleted,
1581 : TupleTableSlot **epqreturnslot)
1582 : {
1583 1650970 : EState *estate = context->estate;
1584 1650970 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
1585 1650970 : TupleTableSlot *slot = NULL;
1586 : TM_Result result;
1587 : bool saveOld;
1588 :
1589 1650970 : if (tupleDeleted)
1590 1064 : *tupleDeleted = false;
1591 :
1592 : /*
1593 : * Prepare for the delete. This includes BEFORE ROW triggers, so we're
1594 : * done if it says we are.
1595 : */
1596 1650970 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
1597 : epqreturnslot, tmresult))
1598 52 : return NULL;
1599 :
1600 : /* INSTEAD OF ROW DELETE Triggers */
1601 1650902 : if (resultRelInfo->ri_TrigDesc &&
1602 6926 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
1603 48 : {
1604 : bool dodelete;
1605 :
1606 : Assert(oldtuple != NULL);
1607 54 : dodelete = ExecIRDeleteTriggers(estate, resultRelInfo, oldtuple);
1608 :
1609 54 : if (!dodelete) /* "do nothing" */
1610 6 : return NULL;
1611 : }
1612 1650848 : else if (resultRelInfo->ri_FdwRoutine)
1613 : {
1614 : /*
1615 : * delete from foreign table: let the FDW do it
1616 : *
1617 : * We offer the returning slot as a place to store RETURNING data,
1618 : * although the FDW can return some other slot if it wants.
1619 : */
1620 46 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1621 46 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignDelete(estate,
1622 : resultRelInfo,
1623 : slot,
1624 : context->planSlot);
1625 :
1626 46 : if (slot == NULL) /* "do nothing" */
1627 0 : return NULL;
1628 :
1629 : /*
1630 : * RETURNING expressions might reference the tableoid column, so
1631 : * (re)initialize tts_tableOid before evaluating them.
1632 : */
1633 46 : if (TTS_EMPTY(slot))
1634 10 : ExecStoreAllNullTuple(slot);
1635 :
1636 46 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
1637 : }
1638 : else
1639 : {
1640 : /*
1641 : * delete the tuple
1642 : *
1643 : * Note: if context->estate->es_crosscheck_snapshot isn't
1644 : * InvalidSnapshot, we check that the row to be deleted is visible to
1645 : * that snapshot, and throw a can't-serialize error if not. This is a
1646 : * special-case behavior needed for referential integrity updates in
1647 : * transaction-snapshot mode transactions.
1648 : */
1649 1650802 : ldelete:
1650 1650810 : result = ExecDeleteAct(context, resultRelInfo, tupleid, changingPart);
1651 :
1652 1650774 : if (tmresult)
1653 1030 : *tmresult = result;
1654 :
1655 1650774 : switch (result)
1656 : {
1657 30 : case TM_SelfModified:
1658 :
1659 : /*
1660 : * The target tuple was already updated or deleted by the
1661 : * current command, or by a later command in the current
1662 : * transaction. The former case is possible in a join DELETE
1663 : * where multiple tuples join to the same target tuple. This
1664 : * is somewhat questionable, but Postgres has always allowed
1665 : * it: we just ignore additional deletion attempts.
1666 : *
1667 : * The latter case arises if the tuple is modified by a
1668 : * command in a BEFORE trigger, or perhaps by a command in a
1669 : * volatile function used in the query. In such situations we
1670 : * should not ignore the deletion, but it is equally unsafe to
1671 : * proceed. We don't want to discard the original DELETE
1672 : * while keeping the triggered actions based on its deletion;
1673 : * and it would be no better to allow the original DELETE
1674 : * while discarding updates that it triggered. The row update
1675 : * carries some information that might be important according
1676 : * to business rules; so throwing an error is the only safe
1677 : * course.
1678 : *
1679 : * If a trigger actually intends this type of interaction, it
1680 : * can re-execute the DELETE and then return NULL to cancel
1681 : * the outer delete.
1682 : */
1683 30 : if (context->tmfd.cmax != estate->es_output_cid)
1684 6 : ereport(ERROR,
1685 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1686 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1687 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1688 :
1689 : /* Else, already deleted by self; nothing to do */
1690 24 : return NULL;
1691 :
1692 1650668 : case TM_Ok:
1693 1650668 : break;
1694 :
1695 70 : case TM_Updated:
1696 : {
1697 : TupleTableSlot *inputslot;
1698 : TupleTableSlot *epqslot;
1699 :
1700 70 : if (IsolationUsesXactSnapshot())
1701 2 : ereport(ERROR,
1702 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1703 : errmsg("could not serialize access due to concurrent update")));
1704 :
1705 : /*
1706 : * Already know that we're going to need to do EPQ, so
1707 : * fetch tuple directly into the right slot.
1708 : */
1709 68 : EvalPlanQualBegin(context->epqstate);
1710 68 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
1711 : resultRelInfo->ri_RangeTableIndex);
1712 :
1713 68 : result = table_tuple_lock(resultRelationDesc, tupleid,
1714 : estate->es_snapshot,
1715 : inputslot, estate->es_output_cid,
1716 : LockTupleExclusive, LockWaitBlock,
1717 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
1718 : &context->tmfd);
1719 :
1720 60 : switch (result)
1721 : {
1722 54 : case TM_Ok:
1723 : Assert(context->tmfd.traversed);
1724 54 : epqslot = EvalPlanQual(context->epqstate,
1725 : resultRelationDesc,
1726 : resultRelInfo->ri_RangeTableIndex,
1727 : inputslot);
1728 54 : if (TupIsNull(epqslot))
1729 : /* Tuple not passing quals anymore, exiting... */
1730 30 : return NULL;
1731 :
1732 : /*
1733 : * If requested, skip delete and pass back the
1734 : * updated row.
1735 : */
1736 24 : if (epqreturnslot)
1737 : {
1738 16 : *epqreturnslot = epqslot;
1739 16 : return NULL;
1740 : }
1741 : else
1742 8 : goto ldelete;
1743 :
1744 4 : case TM_SelfModified:
1745 :
1746 : /*
1747 : * This can be reached when following an update
1748 : * chain from a tuple updated by another session,
1749 : * reaching a tuple that was already updated in
1750 : * this transaction. If previously updated by this
1751 : * command, ignore the delete, otherwise error
1752 : * out.
1753 : *
1754 : * See also TM_SelfModified response to
1755 : * table_tuple_delete() above.
1756 : */
1757 4 : if (context->tmfd.cmax != estate->es_output_cid)
1758 2 : ereport(ERROR,
1759 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
1760 : errmsg("tuple to be deleted was already modified by an operation triggered by the current command"),
1761 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
1762 2 : return NULL;
1763 :
1764 2 : case TM_Deleted:
1765 : /* tuple already deleted; nothing to do */
1766 2 : return NULL;
1767 :
1768 0 : default:
1769 :
1770 : /*
1771 : * TM_Invisible should be impossible because we're
1772 : * waiting for updated row versions, and would
1773 : * already have errored out if the first version
1774 : * is invisible.
1775 : *
1776 : * TM_Updated should be impossible, because we're
1777 : * locking the latest version via
1778 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION.
1779 : */
1780 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
1781 : result);
1782 : return NULL;
1783 : }
1784 :
1785 : Assert(false);
1786 : break;
1787 : }
1788 :
1789 6 : case TM_Deleted:
1790 6 : if (IsolationUsesXactSnapshot())
1791 0 : ereport(ERROR,
1792 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
1793 : errmsg("could not serialize access due to concurrent delete")));
1794 : /* tuple already deleted; nothing to do */
1795 6 : return NULL;
1796 :
1797 0 : default:
1798 0 : elog(ERROR, "unrecognized table_tuple_delete status: %u",
1799 : result);
1800 : return NULL;
1801 : }
1802 :
1803 : /*
1804 : * Note: Normally one would think that we have to delete index tuples
1805 : * associated with the heap tuple now...
1806 : *
1807 : * ... but in POSTGRES, we have no need to do this because VACUUM will
1808 : * take care of it later. We can't delete index tuples immediately
1809 : * anyway, since the tuple is still visible to other transactions.
1810 : */
1811 : }
1812 :
1813 1650762 : if (canSetTag)
1814 1649556 : (estate->es_processed)++;
1815 :
1816 : /* Tell caller that the delete actually happened. */
1817 1650762 : if (tupleDeleted)
1818 978 : *tupleDeleted = true;
1819 :
1820 1650762 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple, changingPart);
1821 :
1822 : /*
1823 : * Process RETURNING if present and if requested.
1824 : *
1825 : * If this is part of a cross-partition UPDATE, and the RETURNING list
1826 : * refers to any OLD column values, save the old tuple here for later
1827 : * processing of the RETURNING list by ExecInsert().
1828 : */
1829 1650908 : saveOld = changingPart && resultRelInfo->ri_projectReturning &&
1830 150 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD;
1831 :
1832 1650758 : if (resultRelInfo->ri_projectReturning && (processReturning || saveOld))
1833 : {
1834 : /*
1835 : * We have to put the target tuple into a slot, which means first we
1836 : * gotta fetch it. We can use the trigger tuple slot.
1837 : */
1838 : TupleTableSlot *rslot;
1839 :
1840 1012 : if (resultRelInfo->ri_FdwRoutine)
1841 : {
1842 : /* FDW must have provided a slot containing the deleted row */
1843 : Assert(!TupIsNull(slot));
1844 : }
1845 : else
1846 : {
1847 998 : slot = ExecGetReturningSlot(estate, resultRelInfo);
1848 998 : if (oldtuple != NULL)
1849 : {
1850 24 : ExecForceStoreHeapTuple(oldtuple, slot, false);
1851 : }
1852 : else
1853 : {
1854 974 : if (!table_tuple_fetch_row_version(resultRelationDesc, tupleid,
1855 : SnapshotAny, slot))
1856 0 : elog(ERROR, "failed to fetch deleted tuple for DELETE RETURNING");
1857 : }
1858 : }
1859 :
1860 : /*
1861 : * If required, save the old tuple for later processing of the
1862 : * RETURNING list by ExecInsert().
1863 : */
1864 1012 : if (saveOld)
1865 : {
1866 : TupleConversionMap *tupconv_map;
1867 :
1868 : /*
1869 : * Convert the tuple into the root partition's format/slot, if
1870 : * needed. ExecInsert() will then convert it to the new
1871 : * partition's format/slot, if necessary.
1872 : */
1873 48 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
1874 48 : if (tupconv_map != NULL)
1875 : {
1876 20 : ResultRelInfo *rootRelInfo = context->mtstate->rootResultRelInfo;
1877 20 : TupleTableSlot *oldSlot = slot;
1878 :
1879 20 : slot = execute_attr_map_slot(tupconv_map->attrMap,
1880 : slot,
1881 : ExecGetReturningSlot(estate,
1882 : rootRelInfo));
1883 :
1884 20 : slot->tts_tableOid = oldSlot->tts_tableOid;
1885 20 : ItemPointerCopy(&oldSlot->tts_tid, &slot->tts_tid);
1886 : }
1887 :
1888 48 : context->cpDeletedSlot = slot;
1889 :
1890 48 : return NULL;
1891 : }
1892 :
1893 964 : rslot = ExecProcessReturning(context, resultRelInfo, CMD_DELETE,
1894 : slot, NULL, context->planSlot);
1895 :
1896 : /*
1897 : * Before releasing the target tuple again, make sure rslot has a
1898 : * local copy of any pass-by-reference values.
1899 : */
1900 964 : ExecMaterializeSlot(rslot);
1901 :
1902 964 : ExecClearTuple(slot);
1903 :
1904 964 : return rslot;
1905 : }
1906 :
1907 1649746 : return NULL;
1908 : }
1909 :
1910 : /*
1911 : * ExecCrossPartitionUpdate --- Move an updated tuple to another partition.
1912 : *
1913 : * This works by first deleting the old tuple from the current partition,
1914 : * followed by inserting the new tuple into the root parent table, that is,
1915 : * mtstate->rootResultRelInfo. It will be re-routed from there to the
1916 : * correct partition.
1917 : *
1918 : * Returns true if the tuple has been successfully moved, or if it's found
1919 : * that the tuple was concurrently deleted so there's nothing more to do
1920 : * for the caller.
1921 : *
1922 : * False is returned if the tuple we're trying to move is found to have been
1923 : * concurrently updated. In that case, the caller must check if the updated
1924 : * tuple that's returned in *retry_slot still needs to be re-routed, and call
1925 : * this function again or perform a regular update accordingly. For MERGE,
1926 : * the updated tuple is not returned in *retry_slot; it has its own retry
1927 : * logic.
1928 : */
1929 : static bool
1930 1112 : ExecCrossPartitionUpdate(ModifyTableContext *context,
1931 : ResultRelInfo *resultRelInfo,
1932 : ItemPointer tupleid, HeapTuple oldtuple,
1933 : TupleTableSlot *slot,
1934 : bool canSetTag,
1935 : UpdateContext *updateCxt,
1936 : TM_Result *tmresult,
1937 : TupleTableSlot **retry_slot,
1938 : TupleTableSlot **inserted_tuple,
1939 : ResultRelInfo **insert_destrel)
1940 : {
1941 1112 : ModifyTableState *mtstate = context->mtstate;
1942 1112 : EState *estate = mtstate->ps.state;
1943 : TupleConversionMap *tupconv_map;
1944 : bool tuple_deleted;
1945 1112 : TupleTableSlot *epqslot = NULL;
1946 :
1947 1112 : context->cpDeletedSlot = NULL;
1948 1112 : context->cpUpdateReturningSlot = NULL;
1949 1112 : *retry_slot = NULL;
1950 :
1951 : /*
1952 : * Disallow an INSERT ON CONFLICT DO UPDATE that causes the original row
1953 : * to migrate to a different partition. Maybe this can be implemented
1954 : * some day, but it seems a fringe feature with little redeeming value.
1955 : */
1956 1112 : if (((ModifyTable *) mtstate->ps.plan)->onConflictAction == ONCONFLICT_UPDATE)
1957 0 : ereport(ERROR,
1958 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1959 : errmsg("invalid ON UPDATE specification"),
1960 : errdetail("The result tuple would appear in a different partition than the original tuple.")));
1961 :
1962 : /*
1963 : * When an UPDATE is run directly on a leaf partition, simply fail with a
1964 : * partition constraint violation error.
1965 : */
1966 1112 : if (resultRelInfo == mtstate->rootResultRelInfo)
1967 48 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1968 :
1969 : /* Initialize tuple routing info if not already done. */
1970 1064 : if (mtstate->mt_partition_tuple_routing == NULL)
1971 : {
1972 678 : Relation rootRel = mtstate->rootResultRelInfo->ri_RelationDesc;
1973 : MemoryContext oldcxt;
1974 :
1975 : /* Things built here have to last for the query duration. */
1976 678 : oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1977 :
1978 678 : mtstate->mt_partition_tuple_routing =
1979 678 : ExecSetupPartitionTupleRouting(estate, rootRel);
1980 :
1981 : /*
1982 : * Before a partition's tuple can be re-routed, it must first be
1983 : * converted to the root's format, so we'll need a slot for storing
1984 : * such tuples.
1985 : */
1986 : Assert(mtstate->mt_root_tuple_slot == NULL);
1987 678 : mtstate->mt_root_tuple_slot = table_slot_create(rootRel, NULL);
1988 :
1989 678 : MemoryContextSwitchTo(oldcxt);
1990 : }
1991 :
1992 : /*
1993 : * Row movement, part 1. Delete the tuple, but skip RETURNING processing.
1994 : * We want to return rows from INSERT.
1995 : */
1996 1064 : ExecDelete(context, resultRelInfo,
1997 : tupleid, oldtuple,
1998 : false, /* processReturning */
1999 : true, /* changingPart */
2000 : false, /* canSetTag */
2001 : tmresult, &tuple_deleted, &epqslot);
2002 :
2003 : /*
2004 : * For some reason if DELETE didn't happen (e.g. trigger prevented it, or
2005 : * it was already deleted by self, or it was concurrently deleted by
2006 : * another transaction), then we should skip the insert as well;
2007 : * otherwise, an UPDATE could cause an increase in the total number of
2008 : * rows across all partitions, which is clearly wrong.
2009 : *
2010 : * For a normal UPDATE, the case where the tuple has been the subject of a
2011 : * concurrent UPDATE or DELETE would be handled by the EvalPlanQual
2012 : * machinery, but for an UPDATE that we've translated into a DELETE from
2013 : * this partition and an INSERT into some other partition, that's not
2014 : * available, because CTID chains can't span relation boundaries. We
2015 : * mimic the semantics to a limited extent by skipping the INSERT if the
2016 : * DELETE fails to find a tuple. This ensures that two concurrent
2017 : * attempts to UPDATE the same tuple at the same time can't turn one tuple
2018 : * into two, and that an UPDATE of a just-deleted tuple can't resurrect
2019 : * it.
2020 : */
2021 1058 : if (!tuple_deleted)
2022 : {
2023 : /*
2024 : * epqslot will be typically NULL. But when ExecDelete() finds that
2025 : * another transaction has concurrently updated the same row, it
2026 : * re-fetches the row, skips the delete, and epqslot is set to the
2027 : * re-fetched tuple slot. In that case, we need to do all the checks
2028 : * again. For MERGE, we leave everything to the caller (it must do
2029 : * additional rechecking, and might end up executing a different
2030 : * action entirely).
2031 : */
2032 80 : if (mtstate->operation == CMD_MERGE)
2033 38 : return *tmresult == TM_Ok;
2034 42 : else if (TupIsNull(epqslot))
2035 36 : return true;
2036 : else
2037 : {
2038 : /* Fetch the most recent version of old tuple. */
2039 : TupleTableSlot *oldSlot;
2040 :
2041 : /* ... but first, make sure ri_oldTupleSlot is initialized. */
2042 6 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2043 0 : ExecInitUpdateProjection(mtstate, resultRelInfo);
2044 6 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2045 6 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
2046 : tupleid,
2047 : SnapshotAny,
2048 : oldSlot))
2049 0 : elog(ERROR, "failed to fetch tuple being updated");
2050 : /* and project the new tuple to retry the UPDATE with */
2051 6 : *retry_slot = ExecGetUpdateNewTuple(resultRelInfo, epqslot,
2052 : oldSlot);
2053 6 : return false;
2054 : }
2055 : }
2056 :
2057 : /*
2058 : * resultRelInfo is one of the per-relation resultRelInfos. So we should
2059 : * convert the tuple into root's tuple descriptor if needed, since
2060 : * ExecInsert() starts the search from root.
2061 : */
2062 978 : tupconv_map = ExecGetChildToRootMap(resultRelInfo);
2063 978 : if (tupconv_map != NULL)
2064 316 : slot = execute_attr_map_slot(tupconv_map->attrMap,
2065 : slot,
2066 : mtstate->mt_root_tuple_slot);
2067 :
2068 : /* Tuple routing starts from the root table. */
2069 850 : context->cpUpdateReturningSlot =
2070 978 : ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
2071 : inserted_tuple, insert_destrel);
2072 :
2073 : /*
2074 : * Reset the transition state that may possibly have been written by
2075 : * INSERT.
2076 : */
2077 850 : if (mtstate->mt_transition_capture)
2078 54 : mtstate->mt_transition_capture->tcs_original_insert_tuple = NULL;
2079 :
2080 : /* We're done moving. */
2081 850 : return true;
2082 : }
2083 :
2084 : /*
2085 : * ExecUpdatePrologue -- subroutine for ExecUpdate
2086 : *
2087 : * Prepare executor state for UPDATE. This includes running BEFORE ROW
2088 : * triggers. We return false if one of them makes the update a no-op;
2089 : * otherwise, return true.
2090 : */
2091 : static bool
2092 325208 : ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2093 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2094 : TM_Result *result)
2095 : {
2096 325208 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2097 :
2098 325208 : if (result)
2099 2184 : *result = TM_Ok;
2100 :
2101 325208 : ExecMaterializeSlot(slot);
2102 :
2103 : /*
2104 : * Open the table's indexes, if we have not done so already, so that we
2105 : * can add new index entries for the updated tuple.
2106 : */
2107 325208 : if (resultRelationDesc->rd_rel->relhasindex &&
2108 234178 : resultRelInfo->ri_IndexRelationDescs == NULL)
2109 9046 : ExecOpenIndices(resultRelInfo, false);
2110 :
2111 : /* BEFORE ROW UPDATE triggers */
2112 325208 : if (resultRelInfo->ri_TrigDesc &&
2113 6346 : resultRelInfo->ri_TrigDesc->trig_update_before_row)
2114 : {
2115 : /* Flush any pending inserts, so rows are visible to the triggers */
2116 2610 : if (context->estate->es_insert_pending_result_relations != NIL)
2117 2 : ExecPendingInserts(context->estate);
2118 :
2119 2586 : return ExecBRUpdateTriggers(context->estate, context->epqstate,
2120 : resultRelInfo, tupleid, oldtuple, slot,
2121 : result, &context->tmfd,
2122 2610 : context->mtstate->operation == CMD_MERGE);
2123 : }
2124 :
2125 322598 : return true;
2126 : }
2127 :
2128 : /*
2129 : * ExecUpdatePrepareSlot -- subroutine for ExecUpdateAct
2130 : *
2131 : * Apply the final modifications to the tuple slot before the update.
2132 : * (This is split out because we also need it in the foreign-table code path.)
2133 : */
2134 : static void
2135 324934 : ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
2136 : TupleTableSlot *slot,
2137 : EState *estate)
2138 : {
2139 324934 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2140 :
2141 : /*
2142 : * Constraints and GENERATED expressions might reference the tableoid
2143 : * column, so (re-)initialize tts_tableOid before evaluating them.
2144 : */
2145 324934 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2146 :
2147 : /*
2148 : * Compute stored generated columns
2149 : */
2150 324934 : if (resultRelationDesc->rd_att->constr &&
2151 197304 : resultRelationDesc->rd_att->constr->has_generated_stored)
2152 276 : ExecComputeStoredGenerated(resultRelInfo, estate, slot,
2153 : CMD_UPDATE);
2154 324934 : }
2155 :
2156 : /*
2157 : * ExecUpdateAct -- subroutine for ExecUpdate
2158 : *
2159 : * Actually update the tuple, when operating on a plain table. If the
2160 : * table is a partition, and the command was called referencing an ancestor
2161 : * partitioned table, this routine migrates the resulting tuple to another
2162 : * partition.
2163 : *
2164 : * The caller is in charge of keeping indexes current as necessary. The
2165 : * caller is also in charge of doing EvalPlanQual if the tuple is found to
2166 : * be concurrently updated. However, in case of a cross-partition update,
2167 : * this routine does it.
2168 : */
2169 : static TM_Result
2170 324738 : ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2171 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *slot,
2172 : bool canSetTag, UpdateContext *updateCxt)
2173 : {
2174 324738 : EState *estate = context->estate;
2175 324738 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2176 : bool partition_constraint_failed;
2177 : TM_Result result;
2178 :
2179 324738 : updateCxt->crossPartUpdate = false;
2180 :
2181 : /*
2182 : * If we move the tuple to a new partition, we loop back here to recompute
2183 : * GENERATED values (which are allowed to be different across partitions)
2184 : * and recheck any RLS policies and constraints. We do not fire any
2185 : * BEFORE triggers of the new partition, however.
2186 : */
2187 324744 : lreplace:
2188 : /* Fill in GENERATEd columns */
2189 324744 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2190 :
2191 : /* ensure slot is independent, consider e.g. EPQ */
2192 324744 : ExecMaterializeSlot(slot);
2193 :
2194 : /*
2195 : * If partition constraint fails, this row might get moved to another
2196 : * partition, in which case we should check the RLS CHECK policy just
2197 : * before inserting into the new partition, rather than doing it here.
2198 : * This is because a trigger on that partition might again change the row.
2199 : * So skip the WCO checks if the partition constraint fails.
2200 : */
2201 324744 : partition_constraint_failed =
2202 327494 : resultRelationDesc->rd_rel->relispartition &&
2203 2750 : !ExecPartitionCheck(resultRelInfo, slot, estate, false);
2204 :
2205 : /* Check any RLS UPDATE WITH CHECK policies */
2206 324744 : if (!partition_constraint_failed &&
2207 323632 : resultRelInfo->ri_WithCheckOptions != NIL)
2208 : {
2209 : /*
2210 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind
2211 : * we are looking for at this point.
2212 : */
2213 534 : ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
2214 : resultRelInfo, slot, estate);
2215 : }
2216 :
2217 : /*
2218 : * If a partition check failed, try to move the row into the right
2219 : * partition.
2220 : */
2221 324690 : if (partition_constraint_failed)
2222 : {
2223 : TupleTableSlot *inserted_tuple,
2224 : *retry_slot;
2225 1112 : ResultRelInfo *insert_destrel = NULL;
2226 :
2227 : /*
2228 : * ExecCrossPartitionUpdate will first DELETE the row from the
2229 : * partition it's currently in and then insert it back into the root
2230 : * table, which will re-route it to the correct partition. However,
2231 : * if the tuple has been concurrently updated, a retry is needed.
2232 : */
2233 1112 : if (ExecCrossPartitionUpdate(context, resultRelInfo,
2234 : tupleid, oldtuple, slot,
2235 : canSetTag, updateCxt,
2236 : &result,
2237 : &retry_slot,
2238 : &inserted_tuple,
2239 : &insert_destrel))
2240 : {
2241 : /* success! */
2242 910 : updateCxt->crossPartUpdate = true;
2243 :
2244 : /*
2245 : * If the partitioned table being updated is referenced in foreign
2246 : * keys, queue up trigger events to check that none of them were
2247 : * violated. No special treatment is needed in
2248 : * non-cross-partition update situations, because the leaf
2249 : * partition's AR update triggers will take care of that. During
2250 : * cross-partition updates implemented as delete on the source
2251 : * partition followed by insert on the destination partition,
2252 : * AR-UPDATE triggers of the root table (that is, the table
2253 : * mentioned in the query) must be fired.
2254 : *
2255 : * NULL insert_destrel means that the move failed to occur, that
2256 : * is, the update failed, so no need to anything in that case.
2257 : */
2258 910 : if (insert_destrel &&
2259 822 : resultRelInfo->ri_TrigDesc &&
2260 368 : resultRelInfo->ri_TrigDesc->trig_update_after_row)
2261 306 : ExecCrossPartitionUpdateForeignKey(context,
2262 : resultRelInfo,
2263 : insert_destrel,
2264 : tupleid, slot,
2265 : inserted_tuple);
2266 :
2267 918 : return TM_Ok;
2268 : }
2269 :
2270 : /*
2271 : * No luck, a retry is needed. If running MERGE, we do not do so
2272 : * here; instead let it handle that on its own rules.
2273 : */
2274 20 : if (context->mtstate->operation == CMD_MERGE)
2275 14 : return result;
2276 :
2277 : /*
2278 : * ExecCrossPartitionUpdate installed an updated version of the new
2279 : * tuple in the retry slot; start over.
2280 : */
2281 6 : slot = retry_slot;
2282 6 : goto lreplace;
2283 : }
2284 :
2285 : /*
2286 : * Check the constraints of the tuple. We've already checked the
2287 : * partition constraint above; however, we must still ensure the tuple
2288 : * passes all other constraints, so we will call ExecConstraints() and
2289 : * have it validate all remaining checks.
2290 : */
2291 323578 : if (resultRelationDesc->rd_att->constr)
2292 196678 : ExecConstraints(resultRelInfo, slot, estate);
2293 :
2294 : /*
2295 : * replace the heap tuple
2296 : *
2297 : * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
2298 : * the row to be updated is visible to that snapshot, and throw a
2299 : * can't-serialize error if not. This is a special-case behavior needed
2300 : * for referential integrity updates in transaction-snapshot mode
2301 : * transactions.
2302 : */
2303 323504 : result = table_tuple_update(resultRelationDesc, tupleid, slot,
2304 : estate->es_output_cid,
2305 : estate->es_snapshot,
2306 : estate->es_crosscheck_snapshot,
2307 : true /* wait for commit */ ,
2308 : &context->tmfd, &updateCxt->lockmode,
2309 : &updateCxt->updateIndexes);
2310 :
2311 323480 : return result;
2312 : }
2313 :
2314 : /*
2315 : * ExecUpdateEpilogue -- subroutine for ExecUpdate
2316 : *
2317 : * Closing steps of updating a tuple. Must be called if ExecUpdateAct
2318 : * returns indicating that the tuple was updated.
2319 : */
2320 : static void
2321 323526 : ExecUpdateEpilogue(ModifyTableContext *context, UpdateContext *updateCxt,
2322 : ResultRelInfo *resultRelInfo, ItemPointer tupleid,
2323 : HeapTuple oldtuple, TupleTableSlot *slot)
2324 : {
2325 323526 : ModifyTableState *mtstate = context->mtstate;
2326 323526 : List *recheckIndexes = NIL;
2327 :
2328 : /* insert index entries for tuple if necessary */
2329 323526 : if (resultRelInfo->ri_NumIndices > 0 && (updateCxt->updateIndexes != TU_None))
2330 177332 : recheckIndexes = ExecInsertIndexTuples(resultRelInfo,
2331 : slot, context->estate,
2332 : true, false,
2333 : NULL, NIL,
2334 177332 : (updateCxt->updateIndexes == TU_Summarizing));
2335 :
2336 : /* AFTER ROW UPDATE Triggers */
2337 323434 : ExecARUpdateTriggers(context->estate, resultRelInfo,
2338 : NULL, NULL,
2339 : tupleid, oldtuple, slot,
2340 : recheckIndexes,
2341 323434 : mtstate->operation == CMD_INSERT ?
2342 : mtstate->mt_oc_transition_capture :
2343 : mtstate->mt_transition_capture,
2344 : false);
2345 :
2346 323430 : list_free(recheckIndexes);
2347 :
2348 : /*
2349 : * Check any WITH CHECK OPTION constraints from parent views. We are
2350 : * required to do this after testing all constraints and uniqueness
2351 : * violations per the SQL spec, so we do it after actually updating the
2352 : * record in the heap and all indexes.
2353 : *
2354 : * ExecWithCheckOptions() will skip any WCOs which are not of the kind we
2355 : * are looking for at this point.
2356 : */
2357 323430 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2358 508 : ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo,
2359 : slot, context->estate);
2360 323348 : }
2361 :
2362 : /*
2363 : * Queues up an update event using the target root partitioned table's
2364 : * trigger to check that a cross-partition update hasn't broken any foreign
2365 : * keys pointing into it.
2366 : */
2367 : static void
2368 306 : ExecCrossPartitionUpdateForeignKey(ModifyTableContext *context,
2369 : ResultRelInfo *sourcePartInfo,
2370 : ResultRelInfo *destPartInfo,
2371 : ItemPointer tupleid,
2372 : TupleTableSlot *oldslot,
2373 : TupleTableSlot *newslot)
2374 : {
2375 : ListCell *lc;
2376 : ResultRelInfo *rootRelInfo;
2377 : List *ancestorRels;
2378 :
2379 306 : rootRelInfo = sourcePartInfo->ri_RootResultRelInfo;
2380 306 : ancestorRels = ExecGetAncestorResultRels(context->estate, sourcePartInfo);
2381 :
2382 : /*
2383 : * For any foreign keys that point directly into a non-root ancestors of
2384 : * the source partition, we can in theory fire an update event to enforce
2385 : * those constraints using their triggers, if we could tell that both the
2386 : * source and the destination partitions are under the same ancestor. But
2387 : * for now, we simply report an error that those cannot be enforced.
2388 : */
2389 666 : foreach(lc, ancestorRels)
2390 : {
2391 366 : ResultRelInfo *rInfo = lfirst(lc);
2392 366 : TriggerDesc *trigdesc = rInfo->ri_TrigDesc;
2393 366 : bool has_noncloned_fkey = false;
2394 :
2395 : /* Root ancestor's triggers will be processed. */
2396 366 : if (rInfo == rootRelInfo)
2397 300 : continue;
2398 :
2399 66 : if (trigdesc && trigdesc->trig_update_after_row)
2400 : {
2401 228 : for (int i = 0; i < trigdesc->numtriggers; i++)
2402 : {
2403 168 : Trigger *trig = &trigdesc->triggers[i];
2404 :
2405 174 : if (!trig->tgisclone &&
2406 6 : RI_FKey_trigger_type(trig->tgfoid) == RI_TRIGGER_PK)
2407 : {
2408 6 : has_noncloned_fkey = true;
2409 6 : break;
2410 : }
2411 : }
2412 : }
2413 :
2414 66 : if (has_noncloned_fkey)
2415 6 : ereport(ERROR,
2416 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2417 : errmsg("cannot move tuple across partitions when a non-root ancestor of the source partition is directly referenced in a foreign key"),
2418 : errdetail("A foreign key points to ancestor \"%s\" but not the root ancestor \"%s\".",
2419 : RelationGetRelationName(rInfo->ri_RelationDesc),
2420 : RelationGetRelationName(rootRelInfo->ri_RelationDesc)),
2421 : errhint("Consider defining the foreign key on table \"%s\".",
2422 : RelationGetRelationName(rootRelInfo->ri_RelationDesc))));
2423 : }
2424 :
2425 : /* Perform the root table's triggers. */
2426 300 : ExecARUpdateTriggers(context->estate,
2427 : rootRelInfo, sourcePartInfo, destPartInfo,
2428 : tupleid, NULL, newslot, NIL, NULL, true);
2429 300 : }
2430 :
2431 : /* ----------------------------------------------------------------
2432 : * ExecUpdate
2433 : *
2434 : * note: we can't run UPDATE queries with transactions
2435 : * off because UPDATEs are actually INSERTs and our
2436 : * scan will mistakenly loop forever, updating the tuple
2437 : * it just inserted.. This should be fixed but until it
2438 : * is, we don't want to get stuck in an infinite loop
2439 : * which corrupts your database..
2440 : *
2441 : * When updating a table, tupleid identifies the tuple to update and
2442 : * oldtuple is NULL. When updating through a view INSTEAD OF trigger,
2443 : * oldtuple is passed to the triggers and identifies what to update, and
2444 : * tupleid is invalid. When updating a foreign table, tupleid is
2445 : * invalid; the FDW has to figure out which row to update using data from
2446 : * the planSlot. oldtuple is passed to foreign table triggers; it is
2447 : * NULL when the foreign table has no relevant triggers.
2448 : *
2449 : * oldSlot contains the old tuple value.
2450 : * slot contains the new tuple value to be stored.
2451 : * planSlot is the output of the ModifyTable's subplan; we use it
2452 : * to access values from other input tables (for RETURNING),
2453 : * row-ID junk columns, etc.
2454 : *
2455 : * Returns RETURNING result if any, otherwise NULL. On exit, if tupleid
2456 : * had identified the tuple to update, it will identify the tuple
2457 : * actually updated after EvalPlanQual.
2458 : * ----------------------------------------------------------------
2459 : */
2460 : static TupleTableSlot *
2461 323024 : ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2462 : ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot *oldSlot,
2463 : TupleTableSlot *slot, bool canSetTag)
2464 : {
2465 323024 : EState *estate = context->estate;
2466 323024 : Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
2467 323024 : UpdateContext updateCxt = {0};
2468 : TM_Result result;
2469 :
2470 : /*
2471 : * abort the operation if not running transactions
2472 : */
2473 323024 : if (IsBootstrapProcessingMode())
2474 0 : elog(ERROR, "cannot UPDATE during bootstrap");
2475 :
2476 : /*
2477 : * Prepare for the update. This includes BEFORE ROW triggers, so we're
2478 : * done if it says we are.
2479 : */
2480 323024 : if (!ExecUpdatePrologue(context, resultRelInfo, tupleid, oldtuple, slot, NULL))
2481 132 : return NULL;
2482 :
2483 : /* INSTEAD OF ROW UPDATE Triggers */
2484 322868 : if (resultRelInfo->ri_TrigDesc &&
2485 5822 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
2486 : {
2487 126 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
2488 : oldtuple, slot))
2489 18 : return NULL; /* "do nothing" */
2490 : }
2491 322742 : else if (resultRelInfo->ri_FdwRoutine)
2492 : {
2493 : /* Fill in GENERATEd columns */
2494 190 : ExecUpdatePrepareSlot(resultRelInfo, slot, estate);
2495 :
2496 : /*
2497 : * update in foreign table: let the FDW do it
2498 : */
2499 190 : slot = resultRelInfo->ri_FdwRoutine->ExecForeignUpdate(estate,
2500 : resultRelInfo,
2501 : slot,
2502 : context->planSlot);
2503 :
2504 190 : if (slot == NULL) /* "do nothing" */
2505 2 : return NULL;
2506 :
2507 : /*
2508 : * AFTER ROW Triggers or RETURNING expressions might reference the
2509 : * tableoid column, so (re-)initialize tts_tableOid before evaluating
2510 : * them. (This covers the case where the FDW replaced the slot.)
2511 : */
2512 188 : slot->tts_tableOid = RelationGetRelid(resultRelationDesc);
2513 : }
2514 : else
2515 : {
2516 : ItemPointerData lockedtid;
2517 :
2518 : /*
2519 : * If we generate a new candidate tuple after EvalPlanQual testing, we
2520 : * must loop back here to try again. (We don't need to redo triggers,
2521 : * however. If there are any BEFORE triggers then trigger.c will have
2522 : * done table_tuple_lock to lock the correct tuple, so there's no need
2523 : * to do them again.)
2524 : */
2525 322552 : redo_act:
2526 322652 : lockedtid = *tupleid;
2527 322652 : result = ExecUpdateAct(context, resultRelInfo, tupleid, oldtuple, slot,
2528 : canSetTag, &updateCxt);
2529 :
2530 : /*
2531 : * If ExecUpdateAct reports that a cross-partition update was done,
2532 : * then the RETURNING tuple (if any) has been projected and there's
2533 : * nothing else for us to do.
2534 : */
2535 322336 : if (updateCxt.crossPartUpdate)
2536 898 : return context->cpUpdateReturningSlot;
2537 :
2538 321570 : switch (result)
2539 : {
2540 84 : case TM_SelfModified:
2541 :
2542 : /*
2543 : * The target tuple was already updated or deleted by the
2544 : * current command, or by a later command in the current
2545 : * transaction. The former case is possible in a join UPDATE
2546 : * where multiple tuples join to the same target tuple. This
2547 : * is pretty questionable, but Postgres has always allowed it:
2548 : * we just execute the first update action and ignore
2549 : * additional update attempts.
2550 : *
2551 : * The latter case arises if the tuple is modified by a
2552 : * command in a BEFORE trigger, or perhaps by a command in a
2553 : * volatile function used in the query. In such situations we
2554 : * should not ignore the update, but it is equally unsafe to
2555 : * proceed. We don't want to discard the original UPDATE
2556 : * while keeping the triggered actions based on it; and we
2557 : * have no principled way to merge this update with the
2558 : * previous ones. So throwing an error is the only safe
2559 : * course.
2560 : *
2561 : * If a trigger actually intends this type of interaction, it
2562 : * can re-execute the UPDATE (assuming it can figure out how)
2563 : * and then return NULL to cancel the outer update.
2564 : */
2565 84 : if (context->tmfd.cmax != estate->es_output_cid)
2566 6 : ereport(ERROR,
2567 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2568 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2569 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2570 :
2571 : /* Else, already updated by self; nothing to do */
2572 78 : return NULL;
2573 :
2574 321322 : case TM_Ok:
2575 321322 : break;
2576 :
2577 156 : case TM_Updated:
2578 : {
2579 : TupleTableSlot *inputslot;
2580 : TupleTableSlot *epqslot;
2581 :
2582 156 : if (IsolationUsesXactSnapshot())
2583 4 : ereport(ERROR,
2584 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2585 : errmsg("could not serialize access due to concurrent update")));
2586 :
2587 : /*
2588 : * Already know that we're going to need to do EPQ, so
2589 : * fetch tuple directly into the right slot.
2590 : */
2591 152 : inputslot = EvalPlanQualSlot(context->epqstate, resultRelationDesc,
2592 : resultRelInfo->ri_RangeTableIndex);
2593 :
2594 152 : result = table_tuple_lock(resultRelationDesc, tupleid,
2595 : estate->es_snapshot,
2596 : inputslot, estate->es_output_cid,
2597 : updateCxt.lockmode, LockWaitBlock,
2598 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
2599 : &context->tmfd);
2600 :
2601 148 : switch (result)
2602 : {
2603 138 : case TM_Ok:
2604 : Assert(context->tmfd.traversed);
2605 :
2606 138 : epqslot = EvalPlanQual(context->epqstate,
2607 : resultRelationDesc,
2608 : resultRelInfo->ri_RangeTableIndex,
2609 : inputslot);
2610 138 : if (TupIsNull(epqslot))
2611 : /* Tuple not passing quals anymore, exiting... */
2612 38 : return NULL;
2613 :
2614 : /* Make sure ri_oldTupleSlot is initialized. */
2615 100 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
2616 0 : ExecInitUpdateProjection(context->mtstate,
2617 : resultRelInfo);
2618 :
2619 100 : if (resultRelInfo->ri_needLockTagTuple)
2620 : {
2621 2 : UnlockTuple(resultRelationDesc,
2622 : &lockedtid, InplaceUpdateTupleLock);
2623 2 : LockTuple(resultRelationDesc,
2624 : tupleid, InplaceUpdateTupleLock);
2625 : }
2626 :
2627 : /* Fetch the most recent version of old tuple. */
2628 100 : oldSlot = resultRelInfo->ri_oldTupleSlot;
2629 100 : if (!table_tuple_fetch_row_version(resultRelationDesc,
2630 : tupleid,
2631 : SnapshotAny,
2632 : oldSlot))
2633 0 : elog(ERROR, "failed to fetch tuple being updated");
2634 100 : slot = ExecGetUpdateNewTuple(resultRelInfo,
2635 : epqslot, oldSlot);
2636 100 : goto redo_act;
2637 :
2638 2 : case TM_Deleted:
2639 : /* tuple already deleted; nothing to do */
2640 2 : return NULL;
2641 :
2642 8 : case TM_SelfModified:
2643 :
2644 : /*
2645 : * This can be reached when following an update
2646 : * chain from a tuple updated by another session,
2647 : * reaching a tuple that was already updated in
2648 : * this transaction. If previously modified by
2649 : * this command, ignore the redundant update,
2650 : * otherwise error out.
2651 : *
2652 : * See also TM_SelfModified response to
2653 : * table_tuple_update() above.
2654 : */
2655 8 : if (context->tmfd.cmax != estate->es_output_cid)
2656 2 : ereport(ERROR,
2657 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
2658 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
2659 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
2660 6 : return NULL;
2661 :
2662 0 : default:
2663 : /* see table_tuple_lock call in ExecDelete() */
2664 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
2665 : result);
2666 : return NULL;
2667 : }
2668 : }
2669 :
2670 : break;
2671 :
2672 8 : case TM_Deleted:
2673 8 : if (IsolationUsesXactSnapshot())
2674 0 : ereport(ERROR,
2675 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2676 : errmsg("could not serialize access due to concurrent delete")));
2677 : /* tuple already deleted; nothing to do */
2678 8 : return NULL;
2679 :
2680 0 : default:
2681 0 : elog(ERROR, "unrecognized table_tuple_update status: %u",
2682 : result);
2683 : return NULL;
2684 : }
2685 : }
2686 :
2687 321606 : if (canSetTag)
2688 321008 : (estate->es_processed)++;
2689 :
2690 321606 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid, oldtuple,
2691 : slot);
2692 :
2693 : /* Process RETURNING if present */
2694 321440 : if (resultRelInfo->ri_projectReturning)
2695 2404 : return ExecProcessReturning(context, resultRelInfo, CMD_UPDATE,
2696 : oldSlot, slot, context->planSlot);
2697 :
2698 319036 : return NULL;
2699 : }
2700 :
2701 : /*
2702 : * ExecOnConflictUpdate --- execute UPDATE of INSERT ON CONFLICT DO UPDATE
2703 : *
2704 : * Try to lock tuple for update as part of speculative insertion. If
2705 : * a qual originating from ON CONFLICT DO UPDATE is satisfied, update
2706 : * (but still lock row, even though it may not satisfy estate's
2707 : * snapshot).
2708 : *
2709 : * Returns true if we're done (with or without an update), or false if
2710 : * the caller must retry the INSERT from scratch.
2711 : */
2712 : static bool
2713 5216 : ExecOnConflictUpdate(ModifyTableContext *context,
2714 : ResultRelInfo *resultRelInfo,
2715 : ItemPointer conflictTid,
2716 : TupleTableSlot *excludedSlot,
2717 : bool canSetTag,
2718 : TupleTableSlot **returning)
2719 : {
2720 5216 : ModifyTableState *mtstate = context->mtstate;
2721 5216 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
2722 5216 : Relation relation = resultRelInfo->ri_RelationDesc;
2723 5216 : ExprState *onConflictSetWhere = resultRelInfo->ri_onConflict->oc_WhereClause;
2724 5216 : TupleTableSlot *existing = resultRelInfo->ri_onConflict->oc_Existing;
2725 : TM_FailureData tmfd;
2726 : LockTupleMode lockmode;
2727 : TM_Result test;
2728 : Datum xminDatum;
2729 : TransactionId xmin;
2730 : bool isnull;
2731 :
2732 : /*
2733 : * Parse analysis should have blocked ON CONFLICT for all system
2734 : * relations, which includes these. There's no fundamental obstacle to
2735 : * supporting this; we'd just need to handle LOCKTAG_TUPLE like the other
2736 : * ExecUpdate() caller.
2737 : */
2738 : Assert(!resultRelInfo->ri_needLockTagTuple);
2739 :
2740 : /* Determine lock mode to use */
2741 5216 : lockmode = ExecUpdateLockMode(context->estate, resultRelInfo);
2742 :
2743 : /*
2744 : * Lock tuple for update. Don't follow updates when tuple cannot be
2745 : * locked without doing so. A row locking conflict here means our
2746 : * previous conclusion that the tuple is conclusively committed is not
2747 : * true anymore.
2748 : */
2749 5216 : test = table_tuple_lock(relation, conflictTid,
2750 5216 : context->estate->es_snapshot,
2751 5216 : existing, context->estate->es_output_cid,
2752 : lockmode, LockWaitBlock, 0,
2753 : &tmfd);
2754 5216 : switch (test)
2755 : {
2756 5192 : case TM_Ok:
2757 : /* success! */
2758 5192 : break;
2759 :
2760 24 : case TM_Invisible:
2761 :
2762 : /*
2763 : * This can occur when a just inserted tuple is updated again in
2764 : * the same command. E.g. because multiple rows with the same
2765 : * conflicting key values are inserted.
2766 : *
2767 : * This is somewhat similar to the ExecUpdate() TM_SelfModified
2768 : * case. We do not want to proceed because it would lead to the
2769 : * same row being updated a second time in some unspecified order,
2770 : * and in contrast to plain UPDATEs there's no historical behavior
2771 : * to break.
2772 : *
2773 : * It is the user's responsibility to prevent this situation from
2774 : * occurring. These problems are why the SQL standard similarly
2775 : * specifies that for SQL MERGE, an exception must be raised in
2776 : * the event of an attempt to update the same row twice.
2777 : */
2778 24 : xminDatum = slot_getsysattr(existing,
2779 : MinTransactionIdAttributeNumber,
2780 : &isnull);
2781 : Assert(!isnull);
2782 24 : xmin = DatumGetTransactionId(xminDatum);
2783 :
2784 24 : if (TransactionIdIsCurrentTransactionId(xmin))
2785 24 : ereport(ERROR,
2786 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
2787 : /* translator: %s is a SQL command name */
2788 : errmsg("%s command cannot affect row a second time",
2789 : "ON CONFLICT DO UPDATE"),
2790 : errhint("Ensure that no rows proposed for insertion within the same command have duplicate constrained values.")));
2791 :
2792 : /* This shouldn't happen */
2793 0 : elog(ERROR, "attempted to lock invisible tuple");
2794 : break;
2795 :
2796 0 : case TM_SelfModified:
2797 :
2798 : /*
2799 : * This state should never be reached. As a dirty snapshot is used
2800 : * to find conflicting tuples, speculative insertion wouldn't have
2801 : * seen this row to conflict with.
2802 : */
2803 0 : elog(ERROR, "unexpected self-updated tuple");
2804 : break;
2805 :
2806 0 : case TM_Updated:
2807 0 : if (IsolationUsesXactSnapshot())
2808 0 : ereport(ERROR,
2809 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2810 : errmsg("could not serialize access due to concurrent update")));
2811 :
2812 : /*
2813 : * As long as we don't support an UPDATE of INSERT ON CONFLICT for
2814 : * a partitioned table we shouldn't reach to a case where tuple to
2815 : * be lock is moved to another partition due to concurrent update
2816 : * of the partition key.
2817 : */
2818 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2819 :
2820 : /*
2821 : * Tell caller to try again from the very start.
2822 : *
2823 : * It does not make sense to use the usual EvalPlanQual() style
2824 : * loop here, as the new version of the row might not conflict
2825 : * anymore, or the conflicting tuple has actually been deleted.
2826 : */
2827 0 : ExecClearTuple(existing);
2828 0 : return false;
2829 :
2830 0 : case TM_Deleted:
2831 0 : if (IsolationUsesXactSnapshot())
2832 0 : ereport(ERROR,
2833 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
2834 : errmsg("could not serialize access due to concurrent delete")));
2835 :
2836 : /* see TM_Updated case */
2837 : Assert(!ItemPointerIndicatesMovedPartitions(&tmfd.ctid));
2838 0 : ExecClearTuple(existing);
2839 0 : return false;
2840 :
2841 0 : default:
2842 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
2843 : }
2844 :
2845 : /* Success, the tuple is locked. */
2846 :
2847 : /*
2848 : * Verify that the tuple is visible to our MVCC snapshot if the current
2849 : * isolation level mandates that.
2850 : *
2851 : * It's not sufficient to rely on the check within ExecUpdate() as e.g.
2852 : * CONFLICT ... WHERE clause may prevent us from reaching that.
2853 : *
2854 : * This means we only ever continue when a new command in the current
2855 : * transaction could see the row, even though in READ COMMITTED mode the
2856 : * tuple will not be visible according to the current statement's
2857 : * snapshot. This is in line with the way UPDATE deals with newer tuple
2858 : * versions.
2859 : */
2860 5192 : ExecCheckTupleVisible(context->estate, relation, existing);
2861 :
2862 : /*
2863 : * Make tuple and any needed join variables available to ExecQual and
2864 : * ExecProject. The EXCLUDED tuple is installed in ecxt_innertuple, while
2865 : * the target's existing tuple is installed in the scantuple. EXCLUDED
2866 : * has been made to reference INNER_VAR in setrefs.c, but there is no
2867 : * other redirection.
2868 : */
2869 5192 : econtext->ecxt_scantuple = existing;
2870 5192 : econtext->ecxt_innertuple = excludedSlot;
2871 5192 : econtext->ecxt_outertuple = NULL;
2872 :
2873 5192 : if (!ExecQual(onConflictSetWhere, econtext))
2874 : {
2875 32 : ExecClearTuple(existing); /* see return below */
2876 32 : InstrCountFiltered1(&mtstate->ps, 1);
2877 32 : return true; /* done with the tuple */
2878 : }
2879 :
2880 5160 : if (resultRelInfo->ri_WithCheckOptions != NIL)
2881 : {
2882 : /*
2883 : * Check target's existing tuple against UPDATE-applicable USING
2884 : * security barrier quals (if any), enforced here as RLS checks/WCOs.
2885 : *
2886 : * The rewriter creates UPDATE RLS checks/WCOs for UPDATE security
2887 : * quals, and stores them as WCOs of "kind" WCO_RLS_CONFLICT_CHECK,
2888 : * but that's almost the extent of its special handling for ON
2889 : * CONFLICT DO UPDATE.
2890 : *
2891 : * The rewriter will also have associated UPDATE applicable straight
2892 : * RLS checks/WCOs for the benefit of the ExecUpdate() call that
2893 : * follows. INSERTs and UPDATEs naturally have mutually exclusive WCO
2894 : * kinds, so there is no danger of spurious over-enforcement in the
2895 : * INSERT or UPDATE path.
2896 : */
2897 72 : ExecWithCheckOptions(WCO_RLS_CONFLICT_CHECK, resultRelInfo,
2898 : existing,
2899 : mtstate->ps.state);
2900 : }
2901 :
2902 : /* Project the new tuple version */
2903 5136 : ExecProject(resultRelInfo->ri_onConflict->oc_ProjInfo);
2904 :
2905 : /*
2906 : * Note that it is possible that the target tuple has been modified in
2907 : * this session, after the above table_tuple_lock. We choose to not error
2908 : * out in that case, in line with ExecUpdate's treatment of similar cases.
2909 : * This can happen if an UPDATE is triggered from within ExecQual(),
2910 : * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
2911 : * wCTE in the ON CONFLICT's SET.
2912 : */
2913 :
2914 : /* Execute UPDATE with projection */
2915 10242 : *returning = ExecUpdate(context, resultRelInfo,
2916 : conflictTid, NULL, existing,
2917 5136 : resultRelInfo->ri_onConflict->oc_ProjSlot,
2918 : canSetTag);
2919 :
2920 : /*
2921 : * Clear out existing tuple, as there might not be another conflict among
2922 : * the next input rows. Don't want to hold resources till the end of the
2923 : * query. First though, make sure that the returning slot, if any, has a
2924 : * local copy of any OLD pass-by-reference values, if it refers to any OLD
2925 : * columns.
2926 : */
2927 5106 : if (*returning != NULL &&
2928 226 : resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD)
2929 6 : ExecMaterializeSlot(*returning);
2930 :
2931 5106 : ExecClearTuple(existing);
2932 :
2933 5106 : return true;
2934 : }
2935 :
2936 : /*
2937 : * Perform MERGE.
2938 : */
2939 : static TupleTableSlot *
2940 14950 : ExecMerge(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
2941 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag)
2942 : {
2943 14950 : TupleTableSlot *rslot = NULL;
2944 : bool matched;
2945 :
2946 : /*-----
2947 : * If we are dealing with a WHEN MATCHED case, tupleid or oldtuple is
2948 : * valid, depending on whether the result relation is a table or a view.
2949 : * We execute the first action for which the additional WHEN MATCHED AND
2950 : * quals pass. If an action without quals is found, that action is
2951 : * executed.
2952 : *
2953 : * Similarly, in the WHEN NOT MATCHED BY SOURCE case, tupleid or oldtuple
2954 : * is valid, and we look at the given WHEN NOT MATCHED BY SOURCE actions
2955 : * in sequence until one passes. This is almost identical to the WHEN
2956 : * MATCHED case, and both cases are handled by ExecMergeMatched().
2957 : *
2958 : * Finally, in the WHEN NOT MATCHED [BY TARGET] case, both tupleid and
2959 : * oldtuple are invalid, and we look at the given WHEN NOT MATCHED [BY
2960 : * TARGET] actions in sequence until one passes.
2961 : *
2962 : * Things get interesting in case of concurrent update/delete of the
2963 : * target tuple. Such concurrent update/delete is detected while we are
2964 : * executing a WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action.
2965 : *
2966 : * A concurrent update can:
2967 : *
2968 : * 1. modify the target tuple so that the results from checking any
2969 : * additional quals attached to WHEN MATCHED or WHEN NOT MATCHED BY
2970 : * SOURCE actions potentially change, but the result from the join
2971 : * quals does not change.
2972 : *
2973 : * In this case, we are still dealing with the same kind of match
2974 : * (MATCHED or NOT MATCHED BY SOURCE). We recheck the same list of
2975 : * actions from the start and choose the first one that satisfies the
2976 : * new target tuple.
2977 : *
2978 : * 2. modify the target tuple in the WHEN MATCHED case so that the join
2979 : * quals no longer pass and hence the source and target tuples no
2980 : * longer match.
2981 : *
2982 : * In this case, we are now dealing with a NOT MATCHED case, and we
2983 : * process both WHEN NOT MATCHED BY SOURCE and WHEN NOT MATCHED [BY
2984 : * TARGET] actions. First ExecMergeMatched() processes the list of
2985 : * WHEN NOT MATCHED BY SOURCE actions in sequence until one passes,
2986 : * then ExecMergeNotMatched() processes any WHEN NOT MATCHED [BY
2987 : * TARGET] actions in sequence until one passes. Thus we may execute
2988 : * two actions; one of each kind.
2989 : *
2990 : * Thus we support concurrent updates that turn MATCHED candidate rows
2991 : * into NOT MATCHED rows. However, we do not attempt to support cases
2992 : * that would turn NOT MATCHED rows into MATCHED rows, or which would
2993 : * cause a target row to match a different source row.
2994 : *
2995 : * A concurrent delete changes a WHEN MATCHED case to WHEN NOT MATCHED
2996 : * [BY TARGET].
2997 : *
2998 : * ExecMergeMatched() takes care of following the update chain and
2999 : * re-finding the qualifying WHEN MATCHED or WHEN NOT MATCHED BY SOURCE
3000 : * action, as long as the target tuple still exists. If the target tuple
3001 : * gets deleted or a concurrent update causes the join quals to fail, it
3002 : * returns a matched status of false and we call ExecMergeNotMatched().
3003 : * Given that ExecMergeMatched() always makes progress by following the
3004 : * update chain and we never switch from ExecMergeNotMatched() to
3005 : * ExecMergeMatched(), there is no risk of a livelock.
3006 : */
3007 14950 : matched = tupleid != NULL || oldtuple != NULL;
3008 14950 : if (matched)
3009 12264 : rslot = ExecMergeMatched(context, resultRelInfo, tupleid, oldtuple,
3010 : canSetTag, &matched);
3011 :
3012 : /*
3013 : * Deal with the NOT MATCHED case (either a NOT MATCHED tuple from the
3014 : * join, or a previously MATCHED tuple for which ExecMergeMatched() set
3015 : * "matched" to false, indicating that it no longer matches).
3016 : */
3017 14856 : if (!matched)
3018 : {
3019 : /*
3020 : * If a concurrent update turned a MATCHED case into a NOT MATCHED
3021 : * case, and we have both WHEN NOT MATCHED BY SOURCE and WHEN NOT
3022 : * MATCHED [BY TARGET] actions, and there is a RETURNING clause,
3023 : * ExecMergeMatched() may have already executed a WHEN NOT MATCHED BY
3024 : * SOURCE action, and computed the row to return. If so, we cannot
3025 : * execute a WHEN NOT MATCHED [BY TARGET] action now, so mark it as
3026 : * pending (to be processed on the next call to ExecModifyTable()).
3027 : * Otherwise, just process the action now.
3028 : */
3029 2704 : if (rslot == NULL)
3030 2700 : rslot = ExecMergeNotMatched(context, resultRelInfo, canSetTag);
3031 : else
3032 4 : context->mtstate->mt_merge_pending_not_matched = context->planSlot;
3033 : }
3034 :
3035 14796 : return rslot;
3036 : }
3037 :
3038 : /*
3039 : * Check and execute the first qualifying MATCHED or NOT MATCHED BY SOURCE
3040 : * action, depending on whether the join quals are satisfied. If the target
3041 : * relation is a table, the current target tuple is identified by tupleid.
3042 : * Otherwise, if the target relation is a view, oldtuple is the current target
3043 : * tuple from the view.
3044 : *
3045 : * We start from the first WHEN MATCHED or WHEN NOT MATCHED BY SOURCE action
3046 : * and check if the WHEN quals pass, if any. If the WHEN quals for the first
3047 : * action do not pass, we check the second, then the third and so on. If we
3048 : * reach the end without finding a qualifying action, we return NULL.
3049 : * Otherwise, we execute the qualifying action and return its RETURNING
3050 : * result, if any, or NULL.
3051 : *
3052 : * On entry, "*matched" is assumed to be true. If a concurrent update or
3053 : * delete is detected that causes the join quals to no longer pass, we set it
3054 : * to false, indicating that the caller should process any NOT MATCHED [BY
3055 : * TARGET] actions.
3056 : *
3057 : * After a concurrent update, we restart from the first action to look for a
3058 : * new qualifying action to execute. If the join quals originally passed, and
3059 : * the concurrent update caused them to no longer pass, then we switch from
3060 : * the MATCHED to the NOT MATCHED BY SOURCE list of actions before restarting
3061 : * (and setting "*matched" to false). As a result we may execute a WHEN NOT
3062 : * MATCHED BY SOURCE action, and set "*matched" to false, causing the caller
3063 : * to also execute a WHEN NOT MATCHED [BY TARGET] action.
3064 : */
3065 : static TupleTableSlot *
3066 12264 : ExecMergeMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3067 : ItemPointer tupleid, HeapTuple oldtuple, bool canSetTag,
3068 : bool *matched)
3069 : {
3070 12264 : ModifyTableState *mtstate = context->mtstate;
3071 12264 : List **mergeActions = resultRelInfo->ri_MergeActions;
3072 : ItemPointerData lockedtid;
3073 : List *actionStates;
3074 12264 : TupleTableSlot *newslot = NULL;
3075 12264 : TupleTableSlot *rslot = NULL;
3076 12264 : EState *estate = context->estate;
3077 12264 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3078 : bool isNull;
3079 12264 : EPQState *epqstate = &mtstate->mt_epqstate;
3080 : ListCell *l;
3081 :
3082 : /* Expect matched to be true on entry */
3083 : Assert(*matched);
3084 :
3085 : /*
3086 : * If there are no WHEN MATCHED or WHEN NOT MATCHED BY SOURCE actions, we
3087 : * are done.
3088 : */
3089 12264 : if (mergeActions[MERGE_WHEN_MATCHED] == NIL &&
3090 1206 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] == NIL)
3091 534 : return NULL;
3092 :
3093 : /*
3094 : * Make tuple and any needed join variables available to ExecQual and
3095 : * ExecProject. The target's existing tuple is installed in the scantuple.
3096 : * This target relation's slot is required only in the case of a MATCHED
3097 : * or NOT MATCHED BY SOURCE tuple and UPDATE/DELETE actions.
3098 : */
3099 11730 : econtext->ecxt_scantuple = resultRelInfo->ri_oldTupleSlot;
3100 11730 : econtext->ecxt_innertuple = context->planSlot;
3101 11730 : econtext->ecxt_outertuple = NULL;
3102 :
3103 : /*
3104 : * This routine is only invoked for matched target rows, so we should
3105 : * either have the tupleid of the target row, or an old tuple from the
3106 : * target wholerow junk attr.
3107 : */
3108 : Assert(tupleid != NULL || oldtuple != NULL);
3109 11730 : ItemPointerSetInvalid(&lockedtid);
3110 11730 : if (oldtuple != NULL)
3111 : {
3112 : Assert(!resultRelInfo->ri_needLockTagTuple);
3113 96 : ExecForceStoreHeapTuple(oldtuple, resultRelInfo->ri_oldTupleSlot,
3114 : false);
3115 : }
3116 : else
3117 : {
3118 11634 : if (resultRelInfo->ri_needLockTagTuple)
3119 : {
3120 : /*
3121 : * This locks even for CMD_DELETE, for CMD_NOTHING, and for tuples
3122 : * that don't match mas_whenqual. MERGE on system catalogs is a
3123 : * minor use case, so don't bother optimizing those.
3124 : */
3125 7654 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3126 : InplaceUpdateTupleLock);
3127 7654 : lockedtid = *tupleid;
3128 : }
3129 11634 : if (!table_tuple_fetch_row_version(resultRelInfo->ri_RelationDesc,
3130 : tupleid,
3131 : SnapshotAny,
3132 : resultRelInfo->ri_oldTupleSlot))
3133 0 : elog(ERROR, "failed to fetch the target tuple");
3134 : }
3135 :
3136 : /*
3137 : * Test the join condition. If it's satisfied, perform a MATCHED action.
3138 : * Otherwise, perform a NOT MATCHED BY SOURCE action.
3139 : *
3140 : * Note that this join condition will be NULL if there are no NOT MATCHED
3141 : * BY SOURCE actions --- see transform_MERGE_to_join(). In that case, we
3142 : * need only consider MATCHED actions here.
3143 : */
3144 11730 : if (ExecQual(resultRelInfo->ri_MergeJoinCondition, econtext))
3145 11544 : actionStates = mergeActions[MERGE_WHEN_MATCHED];
3146 : else
3147 186 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3148 :
3149 11730 : lmerge_matched:
3150 :
3151 20928 : foreach(l, actionStates)
3152 : {
3153 11886 : MergeActionState *relaction = (MergeActionState *) lfirst(l);
3154 11886 : CmdType commandType = relaction->mas_action->commandType;
3155 : TM_Result result;
3156 11886 : UpdateContext updateCxt = {0};
3157 :
3158 : /*
3159 : * Test condition, if any.
3160 : *
3161 : * In the absence of any condition, we perform the action
3162 : * unconditionally (no need to check separately since ExecQual() will
3163 : * return true if there are no conditions to evaluate).
3164 : */
3165 11886 : if (!ExecQual(relaction->mas_whenqual, econtext))
3166 9122 : continue;
3167 :
3168 : /*
3169 : * Check if the existing target tuple meets the USING checks of
3170 : * UPDATE/DELETE RLS policies. If those checks fail, we throw an
3171 : * error.
3172 : *
3173 : * The WITH CHECK quals for UPDATE RLS policies are applied in
3174 : * ExecUpdateAct() and hence we need not do anything special to handle
3175 : * them.
3176 : *
3177 : * NOTE: We must do this after WHEN quals are evaluated, so that we
3178 : * check policies only when they matter.
3179 : */
3180 2764 : if (resultRelInfo->ri_WithCheckOptions && commandType != CMD_NOTHING)
3181 : {
3182 114 : ExecWithCheckOptions(commandType == CMD_UPDATE ?
3183 : WCO_RLS_MERGE_UPDATE_CHECK : WCO_RLS_MERGE_DELETE_CHECK,
3184 : resultRelInfo,
3185 : resultRelInfo->ri_oldTupleSlot,
3186 114 : context->mtstate->ps.state);
3187 : }
3188 :
3189 : /* Perform stated action */
3190 2740 : switch (commandType)
3191 : {
3192 2184 : case CMD_UPDATE:
3193 :
3194 : /*
3195 : * Project the output tuple, and use that to update the table.
3196 : * We don't need to filter out junk attributes, because the
3197 : * UPDATE action's targetlist doesn't have any.
3198 : */
3199 2184 : newslot = ExecProject(relaction->mas_proj);
3200 :
3201 2184 : mtstate->mt_merge_action = relaction;
3202 2184 : if (!ExecUpdatePrologue(context, resultRelInfo,
3203 : tupleid, NULL, newslot, &result))
3204 : {
3205 20 : if (result == TM_Ok)
3206 160 : goto out; /* "do nothing" */
3207 :
3208 14 : break; /* concurrent update/delete */
3209 : }
3210 :
3211 : /* INSTEAD OF ROW UPDATE Triggers */
3212 2164 : if (resultRelInfo->ri_TrigDesc &&
3213 348 : resultRelInfo->ri_TrigDesc->trig_update_instead_row)
3214 : {
3215 78 : if (!ExecIRUpdateTriggers(estate, resultRelInfo,
3216 : oldtuple, newslot))
3217 0 : goto out; /* "do nothing" */
3218 : }
3219 : else
3220 : {
3221 : /* checked ri_needLockTagTuple above */
3222 : Assert(oldtuple == NULL);
3223 :
3224 2086 : result = ExecUpdateAct(context, resultRelInfo, tupleid,
3225 : NULL, newslot, canSetTag,
3226 : &updateCxt);
3227 :
3228 : /*
3229 : * As in ExecUpdate(), if ExecUpdateAct() reports that a
3230 : * cross-partition update was done, then there's nothing
3231 : * else for us to do --- the UPDATE has been turned into a
3232 : * DELETE and an INSERT, and we must not perform any of
3233 : * the usual post-update tasks. Also, the RETURNING tuple
3234 : * (if any) has been projected, so we can just return
3235 : * that.
3236 : */
3237 2062 : if (updateCxt.crossPartUpdate)
3238 : {
3239 138 : mtstate->mt_merge_updated += 1;
3240 138 : rslot = context->cpUpdateReturningSlot;
3241 138 : goto out;
3242 : }
3243 : }
3244 :
3245 2002 : if (result == TM_Ok)
3246 : {
3247 1920 : ExecUpdateEpilogue(context, &updateCxt, resultRelInfo,
3248 : tupleid, NULL, newslot);
3249 1908 : mtstate->mt_merge_updated += 1;
3250 : }
3251 1990 : break;
3252 :
3253 526 : case CMD_DELETE:
3254 526 : mtstate->mt_merge_action = relaction;
3255 526 : if (!ExecDeletePrologue(context, resultRelInfo, tupleid,
3256 : NULL, NULL, &result))
3257 : {
3258 12 : if (result == TM_Ok)
3259 6 : goto out; /* "do nothing" */
3260 :
3261 6 : break; /* concurrent update/delete */
3262 : }
3263 :
3264 : /* INSTEAD OF ROW DELETE Triggers */
3265 514 : if (resultRelInfo->ri_TrigDesc &&
3266 56 : resultRelInfo->ri_TrigDesc->trig_delete_instead_row)
3267 : {
3268 6 : if (!ExecIRDeleteTriggers(estate, resultRelInfo,
3269 : oldtuple))
3270 0 : goto out; /* "do nothing" */
3271 : }
3272 : else
3273 : {
3274 : /* checked ri_needLockTagTuple above */
3275 : Assert(oldtuple == NULL);
3276 :
3277 508 : result = ExecDeleteAct(context, resultRelInfo, tupleid,
3278 : false);
3279 : }
3280 :
3281 514 : if (result == TM_Ok)
3282 : {
3283 496 : ExecDeleteEpilogue(context, resultRelInfo, tupleid, NULL,
3284 : false);
3285 496 : mtstate->mt_merge_deleted += 1;
3286 : }
3287 514 : break;
3288 :
3289 30 : case CMD_NOTHING:
3290 : /* Doing nothing is always OK */
3291 30 : result = TM_Ok;
3292 30 : break;
3293 :
3294 0 : default:
3295 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3296 : }
3297 :
3298 2554 : switch (result)
3299 : {
3300 2434 : case TM_Ok:
3301 : /* all good; perform final actions */
3302 2434 : if (canSetTag && commandType != CMD_NOTHING)
3303 2382 : (estate->es_processed)++;
3304 :
3305 2434 : break;
3306 :
3307 32 : case TM_SelfModified:
3308 :
3309 : /*
3310 : * The target tuple was already updated or deleted by the
3311 : * current command, or by a later command in the current
3312 : * transaction. The former case is explicitly disallowed by
3313 : * the SQL standard for MERGE, which insists that the MERGE
3314 : * join condition should not join a target row to more than
3315 : * one source row.
3316 : *
3317 : * The latter case arises if the tuple is modified by a
3318 : * command in a BEFORE trigger, or perhaps by a command in a
3319 : * volatile function used in the query. In such situations we
3320 : * should not ignore the MERGE action, but it is equally
3321 : * unsafe to proceed. We don't want to discard the original
3322 : * MERGE action while keeping the triggered actions based on
3323 : * it; and it would be no better to allow the original MERGE
3324 : * action while discarding the updates that it triggered. So
3325 : * throwing an error is the only safe course.
3326 : */
3327 32 : if (context->tmfd.cmax != estate->es_output_cid)
3328 12 : ereport(ERROR,
3329 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3330 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3331 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3332 :
3333 20 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3334 20 : ereport(ERROR,
3335 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3336 : /* translator: %s is a SQL command name */
3337 : errmsg("%s command cannot affect row a second time",
3338 : "MERGE"),
3339 : errhint("Ensure that not more than one source row matches any one target row.")));
3340 :
3341 : /* This shouldn't happen */
3342 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3343 : break;
3344 :
3345 10 : case TM_Deleted:
3346 10 : if (IsolationUsesXactSnapshot())
3347 0 : ereport(ERROR,
3348 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3349 : errmsg("could not serialize access due to concurrent delete")));
3350 :
3351 : /*
3352 : * If the tuple was already deleted, set matched to false to
3353 : * let caller handle it under NOT MATCHED [BY TARGET] clauses.
3354 : */
3355 10 : *matched = false;
3356 10 : goto out;
3357 :
3358 78 : case TM_Updated:
3359 : {
3360 : bool was_matched;
3361 : Relation resultRelationDesc;
3362 : TupleTableSlot *epqslot,
3363 : *inputslot;
3364 : LockTupleMode lockmode;
3365 :
3366 : /*
3367 : * The target tuple was concurrently updated by some other
3368 : * transaction. If we are currently processing a MATCHED
3369 : * action, use EvalPlanQual() with the new version of the
3370 : * tuple and recheck the join qual, to detect a change
3371 : * from the MATCHED to the NOT MATCHED cases. If we are
3372 : * already processing a NOT MATCHED BY SOURCE action, we
3373 : * skip this (cannot switch from NOT MATCHED BY SOURCE to
3374 : * MATCHED).
3375 : */
3376 78 : was_matched = relaction->mas_action->matchKind == MERGE_WHEN_MATCHED;
3377 78 : resultRelationDesc = resultRelInfo->ri_RelationDesc;
3378 78 : lockmode = ExecUpdateLockMode(estate, resultRelInfo);
3379 :
3380 78 : if (was_matched)
3381 78 : inputslot = EvalPlanQualSlot(epqstate, resultRelationDesc,
3382 : resultRelInfo->ri_RangeTableIndex);
3383 : else
3384 0 : inputslot = resultRelInfo->ri_oldTupleSlot;
3385 :
3386 78 : result = table_tuple_lock(resultRelationDesc, tupleid,
3387 : estate->es_snapshot,
3388 : inputslot, estate->es_output_cid,
3389 : lockmode, LockWaitBlock,
3390 : TUPLE_LOCK_FLAG_FIND_LAST_VERSION,
3391 : &context->tmfd);
3392 78 : switch (result)
3393 : {
3394 76 : case TM_Ok:
3395 :
3396 : /*
3397 : * If the tuple was updated and migrated to
3398 : * another partition concurrently, the current
3399 : * MERGE implementation can't follow. There's
3400 : * probably a better way to handle this case, but
3401 : * it'd require recognizing the relation to which
3402 : * the tuple moved, and setting our current
3403 : * resultRelInfo to that.
3404 : */
3405 76 : if (ItemPointerIndicatesMovedPartitions(tupleid))
3406 0 : ereport(ERROR,
3407 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3408 : errmsg("tuple to be merged was already moved to another partition due to concurrent update")));
3409 :
3410 : /*
3411 : * If this was a MATCHED case, use EvalPlanQual()
3412 : * to recheck the join condition.
3413 : */
3414 76 : if (was_matched)
3415 : {
3416 76 : epqslot = EvalPlanQual(epqstate,
3417 : resultRelationDesc,
3418 : resultRelInfo->ri_RangeTableIndex,
3419 : inputslot);
3420 :
3421 : /*
3422 : * If the subplan didn't return a tuple, then
3423 : * we must be dealing with an inner join for
3424 : * which the join condition no longer matches.
3425 : * This can only happen if there are no NOT
3426 : * MATCHED actions, and so there is nothing
3427 : * more to do.
3428 : */
3429 76 : if (TupIsNull(epqslot))
3430 0 : goto out;
3431 :
3432 : /*
3433 : * If we got a NULL ctid from the subplan, the
3434 : * join quals no longer pass and we switch to
3435 : * the NOT MATCHED BY SOURCE case.
3436 : */
3437 76 : (void) ExecGetJunkAttribute(epqslot,
3438 76 : resultRelInfo->ri_RowIdAttNo,
3439 : &isNull);
3440 76 : if (isNull)
3441 4 : *matched = false;
3442 :
3443 : /*
3444 : * Otherwise, recheck the join quals to see if
3445 : * we need to switch to the NOT MATCHED BY
3446 : * SOURCE case.
3447 : */
3448 76 : if (resultRelInfo->ri_needLockTagTuple)
3449 : {
3450 2 : if (ItemPointerIsValid(&lockedtid))
3451 2 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3452 : InplaceUpdateTupleLock);
3453 2 : LockTuple(resultRelInfo->ri_RelationDesc, tupleid,
3454 : InplaceUpdateTupleLock);
3455 2 : lockedtid = *tupleid;
3456 : }
3457 :
3458 76 : if (!table_tuple_fetch_row_version(resultRelationDesc,
3459 : tupleid,
3460 : SnapshotAny,
3461 : resultRelInfo->ri_oldTupleSlot))
3462 0 : elog(ERROR, "failed to fetch the target tuple");
3463 :
3464 76 : if (*matched)
3465 72 : *matched = ExecQual(resultRelInfo->ri_MergeJoinCondition,
3466 : econtext);
3467 :
3468 : /* Switch lists, if necessary */
3469 76 : if (!*matched)
3470 : {
3471 8 : actionStates = mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE];
3472 :
3473 : /*
3474 : * If we have both NOT MATCHED BY SOURCE
3475 : * and NOT MATCHED BY TARGET actions (a
3476 : * full join between the source and target
3477 : * relations), the single previously
3478 : * matched tuple from the outer plan node
3479 : * is treated as two not matched tuples,
3480 : * in the same way as if they had not
3481 : * matched to start with. Therefore, we
3482 : * must adjust the outer plan node's tuple
3483 : * count, if we're instrumenting the
3484 : * query, to get the correct "skipped" row
3485 : * count --- see show_modifytable_info().
3486 : */
3487 8 : if (outerPlanState(mtstate)->instrument &&
3488 2 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] &&
3489 2 : mergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET])
3490 2 : InstrUpdateTupleCount(outerPlanState(mtstate)->instrument, 1.0);
3491 : }
3492 : }
3493 :
3494 : /*
3495 : * Loop back and process the MATCHED or NOT
3496 : * MATCHED BY SOURCE actions from the start.
3497 : */
3498 76 : goto lmerge_matched;
3499 :
3500 0 : case TM_Deleted:
3501 :
3502 : /*
3503 : * tuple already deleted; tell caller to run NOT
3504 : * MATCHED [BY TARGET] actions
3505 : */
3506 0 : *matched = false;
3507 0 : goto out;
3508 :
3509 2 : case TM_SelfModified:
3510 :
3511 : /*
3512 : * This can be reached when following an update
3513 : * chain from a tuple updated by another session,
3514 : * reaching a tuple that was already updated or
3515 : * deleted by the current command, or by a later
3516 : * command in the current transaction. As above,
3517 : * this should always be treated as an error.
3518 : */
3519 2 : if (context->tmfd.cmax != estate->es_output_cid)
3520 0 : ereport(ERROR,
3521 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3522 : errmsg("tuple to be updated or deleted was already modified by an operation triggered by the current command"),
3523 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3524 :
3525 2 : if (TransactionIdIsCurrentTransactionId(context->tmfd.xmax))
3526 2 : ereport(ERROR,
3527 : (errcode(ERRCODE_CARDINALITY_VIOLATION),
3528 : /* translator: %s is a SQL command name */
3529 : errmsg("%s command cannot affect row a second time",
3530 : "MERGE"),
3531 : errhint("Ensure that not more than one source row matches any one target row.")));
3532 :
3533 : /* This shouldn't happen */
3534 0 : elog(ERROR, "attempted to update or delete invisible tuple");
3535 : goto out;
3536 :
3537 0 : default:
3538 : /* see table_tuple_lock call in ExecDelete() */
3539 0 : elog(ERROR, "unexpected table_tuple_lock status: %u",
3540 : result);
3541 : goto out;
3542 : }
3543 : }
3544 :
3545 0 : case TM_Invisible:
3546 : case TM_WouldBlock:
3547 : case TM_BeingModified:
3548 : /* these should not occur */
3549 0 : elog(ERROR, "unexpected tuple operation result: %d", result);
3550 : break;
3551 : }
3552 :
3553 : /* Process RETURNING if present */
3554 2434 : if (resultRelInfo->ri_projectReturning)
3555 : {
3556 428 : switch (commandType)
3557 : {
3558 188 : case CMD_UPDATE:
3559 188 : rslot = ExecProcessReturning(context,
3560 : resultRelInfo,
3561 : CMD_UPDATE,
3562 : resultRelInfo->ri_oldTupleSlot,
3563 : newslot,
3564 : context->planSlot);
3565 188 : break;
3566 :
3567 240 : case CMD_DELETE:
3568 240 : rslot = ExecProcessReturning(context,
3569 : resultRelInfo,
3570 : CMD_DELETE,
3571 : resultRelInfo->ri_oldTupleSlot,
3572 : NULL,
3573 : context->planSlot);
3574 240 : break;
3575 :
3576 0 : case CMD_NOTHING:
3577 0 : break;
3578 :
3579 0 : default:
3580 0 : elog(ERROR, "unrecognized commandType: %d",
3581 : (int) commandType);
3582 : }
3583 : }
3584 :
3585 : /*
3586 : * We've activated one of the WHEN clauses, so we don't search
3587 : * further. This is required behaviour, not an optimization.
3588 : */
3589 2434 : break;
3590 : }
3591 :
3592 : /*
3593 : * Successfully executed an action or no qualifying action was found.
3594 : */
3595 11636 : out:
3596 11636 : if (ItemPointerIsValid(&lockedtid))
3597 7654 : UnlockTuple(resultRelInfo->ri_RelationDesc, &lockedtid,
3598 : InplaceUpdateTupleLock);
3599 11636 : return rslot;
3600 : }
3601 :
3602 : /*
3603 : * Execute the first qualifying NOT MATCHED [BY TARGET] action.
3604 : */
3605 : static TupleTableSlot *
3606 2704 : ExecMergeNotMatched(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
3607 : bool canSetTag)
3608 : {
3609 2704 : ModifyTableState *mtstate = context->mtstate;
3610 2704 : ExprContext *econtext = mtstate->ps.ps_ExprContext;
3611 : List *actionStates;
3612 2704 : TupleTableSlot *rslot = NULL;
3613 : ListCell *l;
3614 :
3615 : /*
3616 : * For INSERT actions, the root relation's merge action is OK since the
3617 : * INSERT's targetlist and the WHEN conditions can only refer to the
3618 : * source relation and hence it does not matter which result relation we
3619 : * work with.
3620 : *
3621 : * XXX does this mean that we can avoid creating copies of actionStates on
3622 : * partitioned tables, for not-matched actions?
3623 : */
3624 2704 : actionStates = resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET];
3625 :
3626 : /*
3627 : * Make source tuple available to ExecQual and ExecProject. We don't need
3628 : * the target tuple, since the WHEN quals and targetlist can't refer to
3629 : * the target columns.
3630 : */
3631 2704 : econtext->ecxt_scantuple = NULL;
3632 2704 : econtext->ecxt_innertuple = context->planSlot;
3633 2704 : econtext->ecxt_outertuple = NULL;
3634 :
3635 3574 : foreach(l, actionStates)
3636 : {
3637 2704 : MergeActionState *action = (MergeActionState *) lfirst(l);
3638 2704 : CmdType commandType = action->mas_action->commandType;
3639 : TupleTableSlot *newslot;
3640 :
3641 : /*
3642 : * Test condition, if any.
3643 : *
3644 : * In the absence of any condition, we perform the action
3645 : * unconditionally (no need to check separately since ExecQual() will
3646 : * return true if there are no conditions to evaluate).
3647 : */
3648 2704 : if (!ExecQual(action->mas_whenqual, econtext))
3649 870 : continue;
3650 :
3651 : /* Perform stated action */
3652 1834 : switch (commandType)
3653 : {
3654 1834 : case CMD_INSERT:
3655 :
3656 : /*
3657 : * Project the tuple. In case of a partitioned table, the
3658 : * projection was already built to use the root's descriptor,
3659 : * so we don't need to map the tuple here.
3660 : */
3661 1834 : newslot = ExecProject(action->mas_proj);
3662 1834 : mtstate->mt_merge_action = action;
3663 :
3664 1834 : rslot = ExecInsert(context, mtstate->rootResultRelInfo,
3665 : newslot, canSetTag, NULL, NULL);
3666 1774 : mtstate->mt_merge_inserted += 1;
3667 1774 : break;
3668 0 : case CMD_NOTHING:
3669 : /* Do nothing */
3670 0 : break;
3671 0 : default:
3672 0 : elog(ERROR, "unknown action in MERGE WHEN NOT MATCHED clause");
3673 : }
3674 :
3675 : /*
3676 : * We've activated one of the WHEN clauses, so we don't search
3677 : * further. This is required behaviour, not an optimization.
3678 : */
3679 1774 : break;
3680 : }
3681 :
3682 2644 : return rslot;
3683 : }
3684 :
3685 : /*
3686 : * Initialize state for execution of MERGE.
3687 : */
3688 : void
3689 1616 : ExecInitMerge(ModifyTableState *mtstate, EState *estate)
3690 : {
3691 1616 : List *mergeActionLists = mtstate->mt_mergeActionLists;
3692 1616 : List *mergeJoinConditions = mtstate->mt_mergeJoinConditions;
3693 1616 : ResultRelInfo *rootRelInfo = mtstate->rootResultRelInfo;
3694 : ResultRelInfo *resultRelInfo;
3695 : ExprContext *econtext;
3696 : ListCell *lc;
3697 : int i;
3698 :
3699 1616 : if (mergeActionLists == NIL)
3700 0 : return;
3701 :
3702 1616 : mtstate->mt_merge_subcommands = 0;
3703 :
3704 1616 : if (mtstate->ps.ps_ExprContext == NULL)
3705 1328 : ExecAssignExprContext(estate, &mtstate->ps);
3706 1616 : econtext = mtstate->ps.ps_ExprContext;
3707 :
3708 : /*
3709 : * Create a MergeActionState for each action on the mergeActionList and
3710 : * add it to either a list of matched actions or not-matched actions.
3711 : *
3712 : * Similar logic appears in ExecInitPartitionInfo(), so if changing
3713 : * anything here, do so there too.
3714 : */
3715 1616 : i = 0;
3716 3474 : foreach(lc, mergeActionLists)
3717 : {
3718 1858 : List *mergeActionList = lfirst(lc);
3719 : Node *joinCondition;
3720 : TupleDesc relationDesc;
3721 : ListCell *l;
3722 :
3723 1858 : joinCondition = (Node *) list_nth(mergeJoinConditions, i);
3724 1858 : resultRelInfo = mtstate->resultRelInfo + i;
3725 1858 : i++;
3726 1858 : relationDesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
3727 :
3728 : /* initialize slots for MERGE fetches from this rel */
3729 1858 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
3730 1858 : ExecInitMergeTupleSlots(mtstate, resultRelInfo);
3731 :
3732 : /* initialize state for join condition checking */
3733 1858 : resultRelInfo->ri_MergeJoinCondition =
3734 1858 : ExecInitQual((List *) joinCondition, &mtstate->ps);
3735 :
3736 5122 : foreach(l, mergeActionList)
3737 : {
3738 3264 : MergeAction *action = (MergeAction *) lfirst(l);
3739 : MergeActionState *action_state;
3740 : TupleTableSlot *tgtslot;
3741 : TupleDesc tgtdesc;
3742 :
3743 : /*
3744 : * Build action merge state for this rel. (For partitions,
3745 : * equivalent code exists in ExecInitPartitionInfo.)
3746 : */
3747 3264 : action_state = makeNode(MergeActionState);
3748 3264 : action_state->mas_action = action;
3749 3264 : action_state->mas_whenqual = ExecInitQual((List *) action->qual,
3750 : &mtstate->ps);
3751 :
3752 : /*
3753 : * We create three lists - one for each MergeMatchKind - and stick
3754 : * the MergeActionState into the appropriate list.
3755 : */
3756 6528 : resultRelInfo->ri_MergeActions[action->matchKind] =
3757 3264 : lappend(resultRelInfo->ri_MergeActions[action->matchKind],
3758 : action_state);
3759 :
3760 3264 : switch (action->commandType)
3761 : {
3762 1080 : case CMD_INSERT:
3763 : /* INSERT actions always use rootRelInfo */
3764 1080 : ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
3765 : action->targetList);
3766 :
3767 : /*
3768 : * If the MERGE targets a partitioned table, any INSERT
3769 : * actions must be routed through it, not the child
3770 : * relations. Initialize the routing struct and the root
3771 : * table's "new" tuple slot for that, if not already done.
3772 : * The projection we prepare, for all relations, uses the
3773 : * root relation descriptor, and targets the plan's root
3774 : * slot. (This is consistent with the fact that we
3775 : * checked the plan output to match the root relation,
3776 : * above.)
3777 : */
3778 1080 : if (rootRelInfo->ri_RelationDesc->rd_rel->relkind ==
3779 : RELKIND_PARTITIONED_TABLE)
3780 : {
3781 336 : if (mtstate->mt_partition_tuple_routing == NULL)
3782 : {
3783 : /*
3784 : * Initialize planstate for routing if not already
3785 : * done.
3786 : *
3787 : * Note that the slot is managed as a standalone
3788 : * slot belonging to ModifyTableState, so we pass
3789 : * NULL for the 2nd argument.
3790 : */
3791 158 : mtstate->mt_root_tuple_slot =
3792 158 : table_slot_create(rootRelInfo->ri_RelationDesc,
3793 : NULL);
3794 158 : mtstate->mt_partition_tuple_routing =
3795 158 : ExecSetupPartitionTupleRouting(estate,
3796 : rootRelInfo->ri_RelationDesc);
3797 : }
3798 336 : tgtslot = mtstate->mt_root_tuple_slot;
3799 336 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3800 : }
3801 : else
3802 : {
3803 : /*
3804 : * If the MERGE targets an inherited table, we insert
3805 : * into the root table, so we must initialize its
3806 : * "new" tuple slot, if not already done, and use its
3807 : * relation descriptor for the projection.
3808 : *
3809 : * For non-inherited tables, rootRelInfo and
3810 : * resultRelInfo are the same, and the "new" tuple
3811 : * slot will already have been initialized.
3812 : */
3813 744 : if (rootRelInfo->ri_newTupleSlot == NULL)
3814 36 : rootRelInfo->ri_newTupleSlot =
3815 36 : table_slot_create(rootRelInfo->ri_RelationDesc,
3816 : &estate->es_tupleTable);
3817 :
3818 744 : tgtslot = rootRelInfo->ri_newTupleSlot;
3819 744 : tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
3820 : }
3821 :
3822 1080 : action_state->mas_proj =
3823 1080 : ExecBuildProjectionInfo(action->targetList, econtext,
3824 : tgtslot,
3825 : &mtstate->ps,
3826 : tgtdesc);
3827 :
3828 1080 : mtstate->mt_merge_subcommands |= MERGE_INSERT;
3829 1080 : break;
3830 1638 : case CMD_UPDATE:
3831 1638 : action_state->mas_proj =
3832 1638 : ExecBuildUpdateProjection(action->targetList,
3833 : true,
3834 : action->updateColnos,
3835 : relationDesc,
3836 : econtext,
3837 : resultRelInfo->ri_newTupleSlot,
3838 : &mtstate->ps);
3839 1638 : mtstate->mt_merge_subcommands |= MERGE_UPDATE;
3840 1638 : break;
3841 470 : case CMD_DELETE:
3842 470 : mtstate->mt_merge_subcommands |= MERGE_DELETE;
3843 470 : break;
3844 76 : case CMD_NOTHING:
3845 76 : break;
3846 0 : default:
3847 0 : elog(ERROR, "unknown action in MERGE WHEN clause");
3848 : break;
3849 : }
3850 : }
3851 : }
3852 :
3853 : /*
3854 : * If the MERGE targets an inherited table, any INSERT actions will use
3855 : * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array.
3856 : * Therefore we must initialize its WITH CHECK OPTION constraints and
3857 : * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo
3858 : * entries.
3859 : *
3860 : * Note that the planner does not build a withCheckOptionList or
3861 : * returningList for the root relation, but as in ExecInitPartitionInfo,
3862 : * we can use the first resultRelInfo entry as a reference to calculate
3863 : * the attno's for the root table.
3864 : */
3865 1616 : if (rootRelInfo != mtstate->resultRelInfo &&
3866 248 : rootRelInfo->ri_RelationDesc->rd_rel->relkind != RELKIND_PARTITIONED_TABLE &&
3867 48 : (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0)
3868 : {
3869 36 : ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
3870 36 : Relation rootRelation = rootRelInfo->ri_RelationDesc;
3871 36 : Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
3872 36 : int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
3873 36 : AttrMap *part_attmap = NULL;
3874 : bool found_whole_row;
3875 :
3876 36 : if (node->withCheckOptionLists != NIL)
3877 : {
3878 : List *wcoList;
3879 18 : List *wcoExprs = NIL;
3880 :
3881 : /* There should be as many WCO lists as result rels */
3882 : Assert(list_length(node->withCheckOptionLists) ==
3883 : list_length(node->resultRelations));
3884 :
3885 : /*
3886 : * Use the first WCO list as a reference. In the most common case,
3887 : * this will be for the same relation as rootRelInfo, and so there
3888 : * will be no need to adjust its attno's.
3889 : */
3890 18 : wcoList = linitial(node->withCheckOptionLists);
3891 18 : if (rootRelation != firstResultRel)
3892 : {
3893 : /* Convert any Vars in it to contain the root's attno's */
3894 : part_attmap =
3895 18 : build_attrmap_by_name(RelationGetDescr(rootRelation),
3896 : RelationGetDescr(firstResultRel),
3897 : false);
3898 :
3899 : wcoList = (List *)
3900 18 : map_variable_attnos((Node *) wcoList,
3901 : firstVarno, 0,
3902 : part_attmap,
3903 18 : RelationGetForm(rootRelation)->reltype,
3904 : &found_whole_row);
3905 : }
3906 :
3907 90 : foreach(lc, wcoList)
3908 : {
3909 72 : WithCheckOption *wco = lfirst_node(WithCheckOption, lc);
3910 72 : ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
3911 : &mtstate->ps);
3912 :
3913 72 : wcoExprs = lappend(wcoExprs, wcoExpr);
3914 : }
3915 :
3916 18 : rootRelInfo->ri_WithCheckOptions = wcoList;
3917 18 : rootRelInfo->ri_WithCheckOptionExprs = wcoExprs;
3918 : }
3919 :
3920 36 : if (node->returningLists != NIL)
3921 : {
3922 : List *returningList;
3923 :
3924 : /* There should be as many returning lists as result rels */
3925 : Assert(list_length(node->returningLists) ==
3926 : list_length(node->resultRelations));
3927 :
3928 : /*
3929 : * Use the first returning list as a reference. In the most common
3930 : * case, this will be for the same relation as rootRelInfo, and so
3931 : * there will be no need to adjust its attno's.
3932 : */
3933 6 : returningList = linitial(node->returningLists);
3934 6 : if (rootRelation != firstResultRel)
3935 : {
3936 : /* Convert any Vars in it to contain the root's attno's */
3937 6 : if (part_attmap == NULL)
3938 : part_attmap =
3939 0 : build_attrmap_by_name(RelationGetDescr(rootRelation),
3940 : RelationGetDescr(firstResultRel),
3941 : false);
3942 :
3943 : returningList = (List *)
3944 6 : map_variable_attnos((Node *) returningList,
3945 : firstVarno, 0,
3946 : part_attmap,
3947 6 : RelationGetForm(rootRelation)->reltype,
3948 : &found_whole_row);
3949 : }
3950 6 : rootRelInfo->ri_returningList = returningList;
3951 :
3952 : /* Initialize the RETURNING projection */
3953 6 : rootRelInfo->ri_projectReturning =
3954 6 : ExecBuildProjectionInfo(returningList, econtext,
3955 : mtstate->ps.ps_ResultTupleSlot,
3956 : &mtstate->ps,
3957 : RelationGetDescr(rootRelation));
3958 : }
3959 : }
3960 : }
3961 :
3962 : /*
3963 : * Initializes the tuple slots in a ResultRelInfo for any MERGE action.
3964 : *
3965 : * We mark 'projectNewInfoValid' even though the projections themselves
3966 : * are not initialized here.
3967 : */
3968 : void
3969 1882 : ExecInitMergeTupleSlots(ModifyTableState *mtstate,
3970 : ResultRelInfo *resultRelInfo)
3971 : {
3972 1882 : EState *estate = mtstate->ps.state;
3973 :
3974 : Assert(!resultRelInfo->ri_projectNewInfoValid);
3975 :
3976 1882 : resultRelInfo->ri_oldTupleSlot =
3977 1882 : table_slot_create(resultRelInfo->ri_RelationDesc,
3978 : &estate->es_tupleTable);
3979 1882 : resultRelInfo->ri_newTupleSlot =
3980 1882 : table_slot_create(resultRelInfo->ri_RelationDesc,
3981 : &estate->es_tupleTable);
3982 1882 : resultRelInfo->ri_projectNewInfoValid = true;
3983 1882 : }
3984 :
3985 : /*
3986 : * Process BEFORE EACH STATEMENT triggers
3987 : */
3988 : static void
3989 119682 : fireBSTriggers(ModifyTableState *node)
3990 : {
3991 119682 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
3992 119682 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
3993 :
3994 119682 : switch (node->operation)
3995 : {
3996 92224 : case CMD_INSERT:
3997 92224 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
3998 92212 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
3999 862 : ExecBSUpdateTriggers(node->ps.state,
4000 : resultRelInfo);
4001 92212 : break;
4002 13832 : case CMD_UPDATE:
4003 13832 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4004 13832 : break;
4005 12164 : case CMD_DELETE:
4006 12164 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4007 12164 : break;
4008 1462 : case CMD_MERGE:
4009 1462 : if (node->mt_merge_subcommands & MERGE_INSERT)
4010 800 : ExecBSInsertTriggers(node->ps.state, resultRelInfo);
4011 1462 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4012 978 : ExecBSUpdateTriggers(node->ps.state, resultRelInfo);
4013 1462 : if (node->mt_merge_subcommands & MERGE_DELETE)
4014 386 : ExecBSDeleteTriggers(node->ps.state, resultRelInfo);
4015 1462 : break;
4016 0 : default:
4017 0 : elog(ERROR, "unknown operation");
4018 : break;
4019 : }
4020 119670 : }
4021 :
4022 : /*
4023 : * Process AFTER EACH STATEMENT triggers
4024 : */
4025 : static void
4026 116370 : fireASTriggers(ModifyTableState *node)
4027 : {
4028 116370 : ModifyTable *plan = (ModifyTable *) node->ps.plan;
4029 116370 : ResultRelInfo *resultRelInfo = node->rootResultRelInfo;
4030 :
4031 116370 : switch (node->operation)
4032 : {
4033 89888 : case CMD_INSERT:
4034 89888 : if (plan->onConflictAction == ONCONFLICT_UPDATE)
4035 754 : ExecASUpdateTriggers(node->ps.state,
4036 : resultRelInfo,
4037 754 : node->mt_oc_transition_capture);
4038 89888 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4039 89888 : node->mt_transition_capture);
4040 89888 : break;
4041 13120 : case CMD_UPDATE:
4042 13120 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4043 13120 : node->mt_transition_capture);
4044 13120 : break;
4045 12054 : case CMD_DELETE:
4046 12054 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4047 12054 : node->mt_transition_capture);
4048 12054 : break;
4049 1308 : case CMD_MERGE:
4050 1308 : if (node->mt_merge_subcommands & MERGE_DELETE)
4051 350 : ExecASDeleteTriggers(node->ps.state, resultRelInfo,
4052 350 : node->mt_transition_capture);
4053 1308 : if (node->mt_merge_subcommands & MERGE_UPDATE)
4054 878 : ExecASUpdateTriggers(node->ps.state, resultRelInfo,
4055 878 : node->mt_transition_capture);
4056 1308 : if (node->mt_merge_subcommands & MERGE_INSERT)
4057 732 : ExecASInsertTriggers(node->ps.state, resultRelInfo,
4058 732 : node->mt_transition_capture);
4059 1308 : break;
4060 0 : default:
4061 0 : elog(ERROR, "unknown operation");
4062 : break;
4063 : }
4064 116370 : }
4065 :
4066 : /*
4067 : * Set up the state needed for collecting transition tuples for AFTER
4068 : * triggers.
4069 : */
4070 : static void
4071 120046 : ExecSetupTransitionCaptureState(ModifyTableState *mtstate, EState *estate)
4072 : {
4073 120046 : ModifyTable *plan = (ModifyTable *) mtstate->ps.plan;
4074 120046 : ResultRelInfo *targetRelInfo = mtstate->rootResultRelInfo;
4075 :
4076 : /* Check for transition tables on the directly targeted relation. */
4077 120046 : mtstate->mt_transition_capture =
4078 120046 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4079 120046 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4080 : mtstate->operation);
4081 120046 : if (plan->operation == CMD_INSERT &&
4082 92232 : plan->onConflictAction == ONCONFLICT_UPDATE)
4083 868 : mtstate->mt_oc_transition_capture =
4084 868 : MakeTransitionCaptureState(targetRelInfo->ri_TrigDesc,
4085 868 : RelationGetRelid(targetRelInfo->ri_RelationDesc),
4086 : CMD_UPDATE);
4087 120046 : }
4088 :
4089 : /*
4090 : * ExecPrepareTupleRouting --- prepare for routing one tuple
4091 : *
4092 : * Determine the partition in which the tuple in slot is to be inserted,
4093 : * and return its ResultRelInfo in *partRelInfo. The return value is
4094 : * a slot holding the tuple of the partition rowtype.
4095 : *
4096 : * This also sets the transition table information in mtstate based on the
4097 : * selected partition.
4098 : */
4099 : static TupleTableSlot *
4100 758696 : ExecPrepareTupleRouting(ModifyTableState *mtstate,
4101 : EState *estate,
4102 : PartitionTupleRouting *proute,
4103 : ResultRelInfo *targetRelInfo,
4104 : TupleTableSlot *slot,
4105 : ResultRelInfo **partRelInfo)
4106 : {
4107 : ResultRelInfo *partrel;
4108 : TupleConversionMap *map;
4109 :
4110 : /*
4111 : * Lookup the target partition's ResultRelInfo. If ExecFindPartition does
4112 : * not find a valid partition for the tuple in 'slot' then an error is
4113 : * raised. An error may also be raised if the found partition is not a
4114 : * valid target for INSERTs. This is required since a partitioned table
4115 : * UPDATE to another partition becomes a DELETE+INSERT.
4116 : */
4117 758696 : partrel = ExecFindPartition(mtstate, targetRelInfo, proute, slot, estate);
4118 :
4119 : /*
4120 : * If we're capturing transition tuples, we might need to convert from the
4121 : * partition rowtype to root partitioned table's rowtype. But if there
4122 : * are no BEFORE triggers on the partition that could change the tuple, we
4123 : * can just remember the original unconverted tuple to avoid a needless
4124 : * round trip conversion.
4125 : */
4126 758474 : if (mtstate->mt_transition_capture != NULL)
4127 : {
4128 : bool has_before_insert_row_trig;
4129 :
4130 196 : has_before_insert_row_trig = (partrel->ri_TrigDesc &&
4131 42 : partrel->ri_TrigDesc->trig_insert_before_row);
4132 :
4133 154 : mtstate->mt_transition_capture->tcs_original_insert_tuple =
4134 154 : !has_before_insert_row_trig ? slot : NULL;
4135 : }
4136 :
4137 : /*
4138 : * Convert the tuple, if necessary.
4139 : */
4140 758474 : map = ExecGetRootToChildMap(partrel, estate);
4141 758474 : if (map != NULL)
4142 : {
4143 68498 : TupleTableSlot *new_slot = partrel->ri_PartitionTupleSlot;
4144 :
4145 68498 : slot = execute_attr_map_slot(map->attrMap, slot, new_slot);
4146 : }
4147 :
4148 758474 : *partRelInfo = partrel;
4149 758474 : return slot;
4150 : }
4151 :
4152 : /* ----------------------------------------------------------------
4153 : * ExecModifyTable
4154 : *
4155 : * Perform table modifications as required, and return RETURNING results
4156 : * if needed.
4157 : * ----------------------------------------------------------------
4158 : */
4159 : static TupleTableSlot *
4160 128576 : ExecModifyTable(PlanState *pstate)
4161 : {
4162 128576 : ModifyTableState *node = castNode(ModifyTableState, pstate);
4163 : ModifyTableContext context;
4164 128576 : EState *estate = node->ps.state;
4165 128576 : CmdType operation = node->operation;
4166 : ResultRelInfo *resultRelInfo;
4167 : PlanState *subplanstate;
4168 : TupleTableSlot *slot;
4169 : TupleTableSlot *oldSlot;
4170 : ItemPointerData tuple_ctid;
4171 : HeapTupleData oldtupdata;
4172 : HeapTuple oldtuple;
4173 : ItemPointer tupleid;
4174 : bool tuplock;
4175 :
4176 128576 : CHECK_FOR_INTERRUPTS();
4177 :
4178 : /*
4179 : * This should NOT get called during EvalPlanQual; we should have passed a
4180 : * subplan tree to EvalPlanQual, instead. Use a runtime test not just
4181 : * Assert because this condition is easy to miss in testing. (Note:
4182 : * although ModifyTable should not get executed within an EvalPlanQual
4183 : * operation, we do have to allow it to be initialized and shut down in
4184 : * case it is within a CTE subplan. Hence this test must be here, not in
4185 : * ExecInitModifyTable.)
4186 : */
4187 128576 : if (estate->es_epq_active != NULL)
4188 0 : elog(ERROR, "ModifyTable should not be called during EvalPlanQual");
4189 :
4190 : /*
4191 : * If we've already completed processing, don't try to do more. We need
4192 : * this test because ExecPostprocessPlan might call us an extra time, and
4193 : * our subplan's nodes aren't necessarily robust against being called
4194 : * extra times.
4195 : */
4196 128576 : if (node->mt_done)
4197 798 : return NULL;
4198 :
4199 : /*
4200 : * On first call, fire BEFORE STATEMENT triggers before proceeding.
4201 : */
4202 127778 : if (node->fireBSTriggers)
4203 : {
4204 119682 : fireBSTriggers(node);
4205 119670 : node->fireBSTriggers = false;
4206 : }
4207 :
4208 : /* Preload local variables */
4209 127766 : resultRelInfo = node->resultRelInfo + node->mt_lastResultIndex;
4210 127766 : subplanstate = outerPlanState(node);
4211 :
4212 : /* Set global context */
4213 127766 : context.mtstate = node;
4214 127766 : context.epqstate = &node->mt_epqstate;
4215 127766 : context.estate = estate;
4216 :
4217 : /*
4218 : * Fetch rows from subplan, and execute the required table modification
4219 : * for each row.
4220 : */
4221 : for (;;)
4222 : {
4223 : /*
4224 : * Reset the per-output-tuple exprcontext. This is needed because
4225 : * triggers expect to use that context as workspace. It's a bit ugly
4226 : * to do this below the top level of the plan, however. We might need
4227 : * to rethink this later.
4228 : */
4229 14367836 : ResetPerTupleExprContext(estate);
4230 :
4231 : /*
4232 : * Reset per-tuple memory context used for processing on conflict and
4233 : * returning clauses, to free any expression evaluation storage
4234 : * allocated in the previous cycle.
4235 : */
4236 14367836 : if (pstate->ps_ExprContext)
4237 352404 : ResetExprContext(pstate->ps_ExprContext);
4238 :
4239 : /*
4240 : * If there is a pending MERGE ... WHEN NOT MATCHED [BY TARGET] action
4241 : * to execute, do so now --- see the comments in ExecMerge().
4242 : */
4243 14367836 : if (node->mt_merge_pending_not_matched != NULL)
4244 : {
4245 4 : context.planSlot = node->mt_merge_pending_not_matched;
4246 4 : context.cpDeletedSlot = NULL;
4247 :
4248 4 : slot = ExecMergeNotMatched(&context, node->resultRelInfo,
4249 4 : node->canSetTag);
4250 :
4251 : /* Clear the pending action */
4252 4 : node->mt_merge_pending_not_matched = NULL;
4253 :
4254 : /*
4255 : * If we got a RETURNING result, return it to the caller. We'll
4256 : * continue the work on next call.
4257 : */
4258 4 : if (slot)
4259 4 : return slot;
4260 :
4261 0 : continue; /* continue with the next tuple */
4262 : }
4263 :
4264 : /* Fetch the next row from subplan */
4265 14367832 : context.planSlot = ExecProcNode(subplanstate);
4266 14367414 : context.cpDeletedSlot = NULL;
4267 :
4268 : /* No more tuples to process? */
4269 14367414 : if (TupIsNull(context.planSlot))
4270 : break;
4271 :
4272 : /*
4273 : * When there are multiple result relations, each tuple contains a
4274 : * junk column that gives the OID of the rel from which it came.
4275 : * Extract it and select the correct result relation.
4276 : */
4277 14251042 : if (AttributeNumberIsValid(node->mt_resultOidAttno))
4278 : {
4279 : Datum datum;
4280 : bool isNull;
4281 : Oid resultoid;
4282 :
4283 5204 : datum = ExecGetJunkAttribute(context.planSlot, node->mt_resultOidAttno,
4284 : &isNull);
4285 5204 : if (isNull)
4286 : {
4287 : /*
4288 : * For commands other than MERGE, any tuples having InvalidOid
4289 : * for tableoid are errors. For MERGE, we may need to handle
4290 : * them as WHEN NOT MATCHED clauses if any, so do that.
4291 : *
4292 : * Note that we use the node's toplevel resultRelInfo, not any
4293 : * specific partition's.
4294 : */
4295 508 : if (operation == CMD_MERGE)
4296 : {
4297 508 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4298 :
4299 508 : slot = ExecMerge(&context, node->resultRelInfo,
4300 508 : NULL, NULL, node->canSetTag);
4301 :
4302 : /*
4303 : * If we got a RETURNING result, return it to the caller.
4304 : * We'll continue the work on next call.
4305 : */
4306 496 : if (slot)
4307 38 : return slot;
4308 :
4309 458 : continue; /* continue with the next tuple */
4310 : }
4311 :
4312 0 : elog(ERROR, "tableoid is NULL");
4313 : }
4314 4696 : resultoid = DatumGetObjectId(datum);
4315 :
4316 : /* If it's not the same as last time, we need to locate the rel */
4317 4696 : if (resultoid != node->mt_lastResultOid)
4318 3222 : resultRelInfo = ExecLookupResultRelByOid(node, resultoid,
4319 : false, true);
4320 : }
4321 :
4322 : /*
4323 : * If resultRelInfo->ri_usesFdwDirectModify is true, all we need to do
4324 : * here is compute the RETURNING expressions.
4325 : */
4326 14250534 : if (resultRelInfo->ri_usesFdwDirectModify)
4327 : {
4328 : Assert(resultRelInfo->ri_projectReturning);
4329 :
4330 : /*
4331 : * A scan slot containing the data that was actually inserted,
4332 : * updated or deleted has already been made available to
4333 : * ExecProcessReturning by IterateDirectModify, so no need to
4334 : * provide it here. The individual old and new slots are not
4335 : * needed, since direct-modify is disabled if the RETURNING list
4336 : * refers to OLD/NEW values.
4337 : */
4338 : Assert((resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_OLD) == 0 &&
4339 : (resultRelInfo->ri_projectReturning->pi_state.flags & EEO_FLAG_HAS_NEW) == 0);
4340 :
4341 694 : slot = ExecProcessReturning(&context, resultRelInfo, operation,
4342 : NULL, NULL, context.planSlot);
4343 :
4344 694 : return slot;
4345 : }
4346 :
4347 14249840 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4348 14249840 : slot = context.planSlot;
4349 :
4350 14249840 : tupleid = NULL;
4351 14249840 : oldtuple = NULL;
4352 :
4353 : /*
4354 : * For UPDATE/DELETE/MERGE, fetch the row identity info for the tuple
4355 : * to be updated/deleted/merged. For a heap relation, that's a TID;
4356 : * otherwise we may have a wholerow junk attr that carries the old
4357 : * tuple in toto. Keep this in step with the part of
4358 : * ExecInitModifyTable that sets up ri_RowIdAttNo.
4359 : */
4360 14249840 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4361 : operation == CMD_MERGE)
4362 : {
4363 : char relkind;
4364 : Datum datum;
4365 : bool isNull;
4366 :
4367 1982236 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4368 1982236 : if (relkind == RELKIND_RELATION ||
4369 570 : relkind == RELKIND_MATVIEW ||
4370 : relkind == RELKIND_PARTITIONED_TABLE)
4371 : {
4372 : /* ri_RowIdAttNo refers to a ctid attribute */
4373 : Assert(AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo));
4374 1981672 : datum = ExecGetJunkAttribute(slot,
4375 1981672 : resultRelInfo->ri_RowIdAttNo,
4376 : &isNull);
4377 :
4378 : /*
4379 : * For commands other than MERGE, any tuples having a null row
4380 : * identifier are errors. For MERGE, we may need to handle
4381 : * them as WHEN NOT MATCHED clauses if any, so do that.
4382 : *
4383 : * Note that we use the node's toplevel resultRelInfo, not any
4384 : * specific partition's.
4385 : */
4386 1981672 : if (isNull)
4387 : {
4388 2130 : if (operation == CMD_MERGE)
4389 : {
4390 2130 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4391 :
4392 2130 : slot = ExecMerge(&context, node->resultRelInfo,
4393 2130 : NULL, NULL, node->canSetTag);
4394 :
4395 : /*
4396 : * If we got a RETURNING result, return it to the
4397 : * caller. We'll continue the work on next call.
4398 : */
4399 2088 : if (slot)
4400 128 : return slot;
4401 :
4402 2002 : continue; /* continue with the next tuple */
4403 : }
4404 :
4405 0 : elog(ERROR, "ctid is NULL");
4406 : }
4407 :
4408 1979542 : tupleid = (ItemPointer) DatumGetPointer(datum);
4409 1979542 : tuple_ctid = *tupleid; /* be sure we don't free ctid!! */
4410 1979542 : tupleid = &tuple_ctid;
4411 : }
4412 :
4413 : /*
4414 : * Use the wholerow attribute, when available, to reconstruct the
4415 : * old relation tuple. The old tuple serves one or both of two
4416 : * purposes: 1) it serves as the OLD tuple for row triggers, 2) it
4417 : * provides values for any unchanged columns for the NEW tuple of
4418 : * an UPDATE, because the subplan does not produce all the columns
4419 : * of the target table.
4420 : *
4421 : * Note that the wholerow attribute does not carry system columns,
4422 : * so foreign table triggers miss seeing those, except that we
4423 : * know enough here to set t_tableOid. Quite separately from
4424 : * this, the FDW may fetch its own junk attrs to identify the row.
4425 : *
4426 : * Other relevant relkinds, currently limited to views, always
4427 : * have a wholerow attribute.
4428 : */
4429 564 : else if (AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4430 : {
4431 534 : datum = ExecGetJunkAttribute(slot,
4432 534 : resultRelInfo->ri_RowIdAttNo,
4433 : &isNull);
4434 :
4435 : /*
4436 : * For commands other than MERGE, any tuples having a null row
4437 : * identifier are errors. For MERGE, we may need to handle
4438 : * them as WHEN NOT MATCHED clauses if any, so do that.
4439 : *
4440 : * Note that we use the node's toplevel resultRelInfo, not any
4441 : * specific partition's.
4442 : */
4443 534 : if (isNull)
4444 : {
4445 48 : if (operation == CMD_MERGE)
4446 : {
4447 48 : EvalPlanQualSetSlot(&node->mt_epqstate, context.planSlot);
4448 :
4449 48 : slot = ExecMerge(&context, node->resultRelInfo,
4450 48 : NULL, NULL, node->canSetTag);
4451 :
4452 : /*
4453 : * If we got a RETURNING result, return it to the
4454 : * caller. We'll continue the work on next call.
4455 : */
4456 42 : if (slot)
4457 12 : return slot;
4458 :
4459 30 : continue; /* continue with the next tuple */
4460 : }
4461 :
4462 0 : elog(ERROR, "wholerow is NULL");
4463 : }
4464 :
4465 486 : oldtupdata.t_data = DatumGetHeapTupleHeader(datum);
4466 486 : oldtupdata.t_len =
4467 486 : HeapTupleHeaderGetDatumLength(oldtupdata.t_data);
4468 486 : ItemPointerSetInvalid(&(oldtupdata.t_self));
4469 : /* Historically, view triggers see invalid t_tableOid. */
4470 486 : oldtupdata.t_tableOid =
4471 486 : (relkind == RELKIND_VIEW) ? InvalidOid :
4472 210 : RelationGetRelid(resultRelInfo->ri_RelationDesc);
4473 :
4474 486 : oldtuple = &oldtupdata;
4475 : }
4476 : else
4477 : {
4478 : /* Only foreign tables are allowed to omit a row-ID attr */
4479 : Assert(relkind == RELKIND_FOREIGN_TABLE);
4480 : }
4481 : }
4482 :
4483 14247662 : switch (operation)
4484 : {
4485 12267604 : case CMD_INSERT:
4486 : /* Initialize projection info if first time for this table */
4487 12267604 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4488 91040 : ExecInitInsertProjection(node, resultRelInfo);
4489 12267604 : slot = ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
4490 12267604 : slot = ExecInsert(&context, resultRelInfo, slot,
4491 12267604 : node->canSetTag, NULL, NULL);
4492 12265462 : break;
4493 :
4494 317888 : case CMD_UPDATE:
4495 317888 : tuplock = false;
4496 :
4497 : /* Initialize projection info if first time for this table */
4498 317888 : if (unlikely(!resultRelInfo->ri_projectNewInfoValid))
4499 13530 : ExecInitUpdateProjection(node, resultRelInfo);
4500 :
4501 : /*
4502 : * Make the new tuple by combining plan's output tuple with
4503 : * the old tuple being updated.
4504 : */
4505 317888 : oldSlot = resultRelInfo->ri_oldTupleSlot;
4506 317888 : if (oldtuple != NULL)
4507 : {
4508 : Assert(!resultRelInfo->ri_needLockTagTuple);
4509 : /* Use the wholerow junk attr as the old tuple. */
4510 318 : ExecForceStoreHeapTuple(oldtuple, oldSlot, false);
4511 : }
4512 : else
4513 : {
4514 : /* Fetch the most recent version of old tuple. */
4515 317570 : Relation relation = resultRelInfo->ri_RelationDesc;
4516 :
4517 317570 : if (resultRelInfo->ri_needLockTagTuple)
4518 : {
4519 25206 : LockTuple(relation, tupleid, InplaceUpdateTupleLock);
4520 25206 : tuplock = true;
4521 : }
4522 317570 : if (!table_tuple_fetch_row_version(relation, tupleid,
4523 : SnapshotAny,
4524 : oldSlot))
4525 0 : elog(ERROR, "failed to fetch tuple being updated");
4526 : }
4527 317888 : slot = ExecGetUpdateNewTuple(resultRelInfo, context.planSlot,
4528 : oldSlot);
4529 :
4530 : /* Now apply the update. */
4531 317888 : slot = ExecUpdate(&context, resultRelInfo, tupleid, oldtuple,
4532 317888 : oldSlot, slot, node->canSetTag);
4533 317384 : if (tuplock)
4534 25206 : UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
4535 : InplaceUpdateTupleLock);
4536 317384 : break;
4537 :
4538 1649906 : case CMD_DELETE:
4539 1649906 : slot = ExecDelete(&context, resultRelInfo, tupleid, oldtuple,
4540 1649906 : true, false, node->canSetTag, NULL, NULL, NULL);
4541 1649838 : break;
4542 :
4543 12264 : case CMD_MERGE:
4544 12264 : slot = ExecMerge(&context, resultRelInfo, tupleid, oldtuple,
4545 12264 : node->canSetTag);
4546 12170 : break;
4547 :
4548 0 : default:
4549 0 : elog(ERROR, "unknown operation");
4550 : break;
4551 : }
4552 :
4553 : /*
4554 : * If we got a RETURNING result, return it to caller. We'll continue
4555 : * the work on next call.
4556 : */
4557 14244854 : if (slot)
4558 7244 : return slot;
4559 : }
4560 :
4561 : /*
4562 : * Insert remaining tuples for batch insert.
4563 : */
4564 116372 : if (estate->es_insert_pending_result_relations != NIL)
4565 26 : ExecPendingInserts(estate);
4566 :
4567 : /*
4568 : * We're done, but fire AFTER STATEMENT triggers before exiting.
4569 : */
4570 116370 : fireASTriggers(node);
4571 :
4572 116370 : node->mt_done = true;
4573 :
4574 116370 : return NULL;
4575 : }
4576 :
4577 : /*
4578 : * ExecLookupResultRelByOid
4579 : * If the table with given OID is among the result relations to be
4580 : * updated by the given ModifyTable node, return its ResultRelInfo.
4581 : *
4582 : * If not found, return NULL if missing_ok, else raise error.
4583 : *
4584 : * If update_cache is true, then upon successful lookup, update the node's
4585 : * one-element cache. ONLY ExecModifyTable may pass true for this.
4586 : */
4587 : ResultRelInfo *
4588 11848 : ExecLookupResultRelByOid(ModifyTableState *node, Oid resultoid,
4589 : bool missing_ok, bool update_cache)
4590 : {
4591 11848 : if (node->mt_resultOidHash)
4592 : {
4593 : /* Use the pre-built hash table to locate the rel */
4594 : MTTargetRelLookup *mtlookup;
4595 :
4596 : mtlookup = (MTTargetRelLookup *)
4597 0 : hash_search(node->mt_resultOidHash, &resultoid, HASH_FIND, NULL);
4598 0 : if (mtlookup)
4599 : {
4600 0 : if (update_cache)
4601 : {
4602 0 : node->mt_lastResultOid = resultoid;
4603 0 : node->mt_lastResultIndex = mtlookup->relationIndex;
4604 : }
4605 0 : return node->resultRelInfo + mtlookup->relationIndex;
4606 : }
4607 : }
4608 : else
4609 : {
4610 : /* With few target rels, just search the ResultRelInfo array */
4611 22500 : for (int ndx = 0; ndx < node->mt_nrels; ndx++)
4612 : {
4613 14382 : ResultRelInfo *rInfo = node->resultRelInfo + ndx;
4614 :
4615 14382 : if (RelationGetRelid(rInfo->ri_RelationDesc) == resultoid)
4616 : {
4617 3730 : if (update_cache)
4618 : {
4619 3222 : node->mt_lastResultOid = resultoid;
4620 3222 : node->mt_lastResultIndex = ndx;
4621 : }
4622 3730 : return rInfo;
4623 : }
4624 : }
4625 : }
4626 :
4627 8118 : if (!missing_ok)
4628 0 : elog(ERROR, "incorrect result relation OID %u", resultoid);
4629 8118 : return NULL;
4630 : }
4631 :
4632 : /* ----------------------------------------------------------------
4633 : * ExecInitModifyTable
4634 : * ----------------------------------------------------------------
4635 : */
4636 : ModifyTableState *
4637 121080 : ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
4638 : {
4639 : ModifyTableState *mtstate;
4640 121080 : Plan *subplan = outerPlan(node);
4641 121080 : CmdType operation = node->operation;
4642 121080 : int total_nrels = list_length(node->resultRelations);
4643 : int nrels;
4644 121080 : List *resultRelations = NIL;
4645 121080 : List *withCheckOptionLists = NIL;
4646 121080 : List *returningLists = NIL;
4647 121080 : List *updateColnosLists = NIL;
4648 121080 : List *mergeActionLists = NIL;
4649 121080 : List *mergeJoinConditions = NIL;
4650 : ResultRelInfo *resultRelInfo;
4651 : List *arowmarks;
4652 : ListCell *l;
4653 : int i;
4654 : Relation rel;
4655 :
4656 : /* check for unsupported flags */
4657 : Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
4658 :
4659 : /*
4660 : * Only consider unpruned relations for initializing their ResultRelInfo
4661 : * struct and other fields such as withCheckOptions, etc.
4662 : *
4663 : * Note: We must avoid pruning every result relation. This is important
4664 : * for MERGE, since even if every result relation is pruned from the
4665 : * subplan, there might still be NOT MATCHED rows, for which there may be
4666 : * INSERT actions to perform. To allow these actions to be found, at
4667 : * least one result relation must be kept. Also, when inserting into a
4668 : * partitioned table, ExecInitPartitionInfo() needs a ResultRelInfo struct
4669 : * as a reference for building the ResultRelInfo of the target partition.
4670 : * In either case, it doesn't matter which result relation is kept, so we
4671 : * just keep the first one, if all others have been pruned. See also,
4672 : * ExecDoInitialPruning(), which ensures that this first result relation
4673 : * has been locked.
4674 : */
4675 121080 : i = 0;
4676 244692 : foreach(l, node->resultRelations)
4677 : {
4678 123612 : Index rti = lfirst_int(l);
4679 : bool keep_rel;
4680 :
4681 123612 : keep_rel = bms_is_member(rti, estate->es_unpruned_relids);
4682 123612 : if (!keep_rel && i == total_nrels - 1 && resultRelations == NIL)
4683 : {
4684 : /* all result relations pruned; keep the first one */
4685 48 : keep_rel = true;
4686 48 : rti = linitial_int(node->resultRelations);
4687 48 : i = 0;
4688 : }
4689 :
4690 123612 : if (keep_rel)
4691 : {
4692 123526 : resultRelations = lappend_int(resultRelations, rti);
4693 123526 : if (node->withCheckOptionLists)
4694 : {
4695 1544 : List *withCheckOptions = list_nth_node(List,
4696 : node->withCheckOptionLists,
4697 : i);
4698 :
4699 1544 : withCheckOptionLists = lappend(withCheckOptionLists, withCheckOptions);
4700 : }
4701 123526 : if (node->returningLists)
4702 : {
4703 5084 : List *returningList = list_nth_node(List,
4704 : node->returningLists,
4705 : i);
4706 :
4707 5084 : returningLists = lappend(returningLists, returningList);
4708 : }
4709 123526 : if (node->updateColnosLists)
4710 : {
4711 16264 : List *updateColnosList = list_nth(node->updateColnosLists, i);
4712 :
4713 16264 : updateColnosLists = lappend(updateColnosLists, updateColnosList);
4714 : }
4715 123526 : if (node->mergeActionLists)
4716 : {
4717 1870 : List *mergeActionList = list_nth(node->mergeActionLists, i);
4718 :
4719 1870 : mergeActionLists = lappend(mergeActionLists, mergeActionList);
4720 : }
4721 123526 : if (node->mergeJoinConditions)
4722 : {
4723 1870 : List *mergeJoinCondition = list_nth(node->mergeJoinConditions, i);
4724 :
4725 1870 : mergeJoinConditions = lappend(mergeJoinConditions, mergeJoinCondition);
4726 : }
4727 : }
4728 123612 : i++;
4729 : }
4730 121080 : nrels = list_length(resultRelations);
4731 : Assert(nrels > 0);
4732 :
4733 : /*
4734 : * create state structure
4735 : */
4736 121080 : mtstate = makeNode(ModifyTableState);
4737 121080 : mtstate->ps.plan = (Plan *) node;
4738 121080 : mtstate->ps.state = estate;
4739 121080 : mtstate->ps.ExecProcNode = ExecModifyTable;
4740 :
4741 121080 : mtstate->operation = operation;
4742 121080 : mtstate->canSetTag = node->canSetTag;
4743 121080 : mtstate->mt_done = false;
4744 :
4745 121080 : mtstate->mt_nrels = nrels;
4746 121080 : mtstate->resultRelInfo = (ResultRelInfo *)
4747 121080 : palloc(nrels * sizeof(ResultRelInfo));
4748 :
4749 121080 : mtstate->mt_merge_pending_not_matched = NULL;
4750 121080 : mtstate->mt_merge_inserted = 0;
4751 121080 : mtstate->mt_merge_updated = 0;
4752 121080 : mtstate->mt_merge_deleted = 0;
4753 121080 : mtstate->mt_updateColnosLists = updateColnosLists;
4754 121080 : mtstate->mt_mergeActionLists = mergeActionLists;
4755 121080 : mtstate->mt_mergeJoinConditions = mergeJoinConditions;
4756 :
4757 : /*----------
4758 : * Resolve the target relation. This is the same as:
4759 : *
4760 : * - the relation for which we will fire FOR STATEMENT triggers,
4761 : * - the relation into whose tuple format all captured transition tuples
4762 : * must be converted, and
4763 : * - the root partitioned table used for tuple routing.
4764 : *
4765 : * If it's a partitioned or inherited table, the root partition or
4766 : * appendrel RTE doesn't appear elsewhere in the plan and its RT index is
4767 : * given explicitly in node->rootRelation. Otherwise, the target relation
4768 : * is the sole relation in the node->resultRelations list and, since it can
4769 : * never be pruned, also in the resultRelations list constructed above.
4770 : *----------
4771 : */
4772 121080 : if (node->rootRelation > 0)
4773 : {
4774 : Assert(bms_is_member(node->rootRelation, estate->es_unpruned_relids));
4775 2908 : mtstate->rootResultRelInfo = makeNode(ResultRelInfo);
4776 2908 : ExecInitResultRelation(estate, mtstate->rootResultRelInfo,
4777 : node->rootRelation);
4778 : }
4779 : else
4780 : {
4781 : Assert(list_length(node->resultRelations) == 1);
4782 : Assert(list_length(resultRelations) == 1);
4783 118172 : mtstate->rootResultRelInfo = mtstate->resultRelInfo;
4784 118172 : ExecInitResultRelation(estate, mtstate->resultRelInfo,
4785 118172 : linitial_int(resultRelations));
4786 : }
4787 :
4788 : /* set up epqstate with dummy subplan data for the moment */
4789 121080 : EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
4790 : node->epqParam, resultRelations);
4791 121080 : mtstate->fireBSTriggers = true;
4792 :
4793 : /*
4794 : * Build state for collecting transition tuples. This requires having a
4795 : * valid trigger query context, so skip it in explain-only mode.
4796 : */
4797 121080 : if (!(eflags & EXEC_FLAG_EXPLAIN_ONLY))
4798 120046 : ExecSetupTransitionCaptureState(mtstate, estate);
4799 :
4800 : /*
4801 : * Open all the result relations and initialize the ResultRelInfo structs.
4802 : * (But root relation was initialized above, if it's part of the array.)
4803 : * We must do this before initializing the subplan, because direct-modify
4804 : * FDWs expect their ResultRelInfos to be available.
4805 : */
4806 121080 : resultRelInfo = mtstate->resultRelInfo;
4807 121080 : i = 0;
4808 244264 : foreach(l, resultRelations)
4809 : {
4810 123520 : Index resultRelation = lfirst_int(l);
4811 123520 : List *mergeActions = NIL;
4812 :
4813 123520 : if (mergeActionLists)
4814 1870 : mergeActions = list_nth(mergeActionLists, i);
4815 :
4816 123520 : if (resultRelInfo != mtstate->rootResultRelInfo)
4817 : {
4818 5348 : ExecInitResultRelation(estate, resultRelInfo, resultRelation);
4819 :
4820 : /*
4821 : * For child result relations, store the root result relation
4822 : * pointer. We do so for the convenience of places that want to
4823 : * look at the query's original target relation but don't have the
4824 : * mtstate handy.
4825 : */
4826 5348 : resultRelInfo->ri_RootResultRelInfo = mtstate->rootResultRelInfo;
4827 : }
4828 :
4829 : /* Initialize the usesFdwDirectModify flag */
4830 123520 : resultRelInfo->ri_usesFdwDirectModify =
4831 123520 : bms_is_member(i, node->fdwDirectModifyPlans);
4832 :
4833 : /*
4834 : * Verify result relation is a valid target for the current operation
4835 : */
4836 123520 : CheckValidResultRel(resultRelInfo, operation, node->onConflictAction,
4837 : mergeActions);
4838 :
4839 123184 : resultRelInfo++;
4840 123184 : i++;
4841 : }
4842 :
4843 : /*
4844 : * Now we may initialize the subplan.
4845 : */
4846 120744 : outerPlanState(mtstate) = ExecInitNode(subplan, estate, eflags);
4847 :
4848 : /*
4849 : * Do additional per-result-relation initialization.
4850 : */
4851 243894 : for (i = 0; i < nrels; i++)
4852 : {
4853 123150 : resultRelInfo = &mtstate->resultRelInfo[i];
4854 :
4855 : /* Let FDWs init themselves for foreign-table result rels */
4856 123150 : if (!resultRelInfo->ri_usesFdwDirectModify &&
4857 122942 : resultRelInfo->ri_FdwRoutine != NULL &&
4858 340 : resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
4859 : {
4860 340 : List *fdw_private = (List *) list_nth(node->fdwPrivLists, i);
4861 :
4862 340 : resultRelInfo->ri_FdwRoutine->BeginForeignModify(mtstate,
4863 : resultRelInfo,
4864 : fdw_private,
4865 : i,
4866 : eflags);
4867 : }
4868 :
4869 : /*
4870 : * For UPDATE/DELETE/MERGE, find the appropriate junk attr now, either
4871 : * a 'ctid' or 'wholerow' attribute depending on relkind. For foreign
4872 : * tables, the FDW might have created additional junk attr(s), but
4873 : * those are no concern of ours.
4874 : */
4875 123150 : if (operation == CMD_UPDATE || operation == CMD_DELETE ||
4876 : operation == CMD_MERGE)
4877 : {
4878 : char relkind;
4879 :
4880 30652 : relkind = resultRelInfo->ri_RelationDesc->rd_rel->relkind;
4881 30652 : if (relkind == RELKIND_RELATION ||
4882 696 : relkind == RELKIND_MATVIEW ||
4883 : relkind == RELKIND_PARTITIONED_TABLE)
4884 : {
4885 29992 : resultRelInfo->ri_RowIdAttNo =
4886 29992 : ExecFindJunkAttributeInTlist(subplan->targetlist, "ctid");
4887 29992 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4888 0 : elog(ERROR, "could not find junk ctid column");
4889 : }
4890 660 : else if (relkind == RELKIND_FOREIGN_TABLE)
4891 : {
4892 : /*
4893 : * We don't support MERGE with foreign tables for now. (It's
4894 : * problematic because the implementation uses CTID.)
4895 : */
4896 : Assert(operation != CMD_MERGE);
4897 :
4898 : /*
4899 : * When there is a row-level trigger, there should be a
4900 : * wholerow attribute. We also require it to be present in
4901 : * UPDATE and MERGE, so we can get the values of unchanged
4902 : * columns.
4903 : */
4904 372 : resultRelInfo->ri_RowIdAttNo =
4905 372 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4906 : "wholerow");
4907 372 : if ((mtstate->operation == CMD_UPDATE || mtstate->operation == CMD_MERGE) &&
4908 210 : !AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4909 0 : elog(ERROR, "could not find junk wholerow column");
4910 : }
4911 : else
4912 : {
4913 : /* Other valid target relkinds must provide wholerow */
4914 288 : resultRelInfo->ri_RowIdAttNo =
4915 288 : ExecFindJunkAttributeInTlist(subplan->targetlist,
4916 : "wholerow");
4917 288 : if (!AttributeNumberIsValid(resultRelInfo->ri_RowIdAttNo))
4918 0 : elog(ERROR, "could not find junk wholerow column");
4919 : }
4920 : }
4921 : }
4922 :
4923 : /*
4924 : * If this is an inherited update/delete/merge, there will be a junk
4925 : * attribute named "tableoid" present in the subplan's targetlist. It
4926 : * will be used to identify the result relation for a given tuple to be
4927 : * updated/deleted/merged.
4928 : */
4929 120744 : mtstate->mt_resultOidAttno =
4930 120744 : ExecFindJunkAttributeInTlist(subplan->targetlist, "tableoid");
4931 : Assert(AttributeNumberIsValid(mtstate->mt_resultOidAttno) || total_nrels == 1);
4932 120744 : mtstate->mt_lastResultOid = InvalidOid; /* force lookup at first tuple */
4933 120744 : mtstate->mt_lastResultIndex = 0; /* must be zero if no such attr */
4934 :
4935 : /* Get the root target relation */
4936 120744 : rel = mtstate->rootResultRelInfo->ri_RelationDesc;
4937 :
4938 : /*
4939 : * Build state for tuple routing if it's a partitioned INSERT. An UPDATE
4940 : * or MERGE might need this too, but only if it actually moves tuples
4941 : * between partitions; in that case setup is done by
4942 : * ExecCrossPartitionUpdate.
4943 : */
4944 120744 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
4945 : operation == CMD_INSERT)
4946 5636 : mtstate->mt_partition_tuple_routing =
4947 5636 : ExecSetupPartitionTupleRouting(estate, rel);
4948 :
4949 : /*
4950 : * Initialize any WITH CHECK OPTION constraints if needed.
4951 : */
4952 120744 : resultRelInfo = mtstate->resultRelInfo;
4953 122288 : foreach(l, withCheckOptionLists)
4954 : {
4955 1544 : List *wcoList = (List *) lfirst(l);
4956 1544 : List *wcoExprs = NIL;
4957 : ListCell *ll;
4958 :
4959 4478 : foreach(ll, wcoList)
4960 : {
4961 2934 : WithCheckOption *wco = (WithCheckOption *) lfirst(ll);
4962 2934 : ExprState *wcoExpr = ExecInitQual((List *) wco->qual,
4963 : &mtstate->ps);
4964 :
4965 2934 : wcoExprs = lappend(wcoExprs, wcoExpr);
4966 : }
4967 :
4968 1544 : resultRelInfo->ri_WithCheckOptions = wcoList;
4969 1544 : resultRelInfo->ri_WithCheckOptionExprs = wcoExprs;
4970 1544 : resultRelInfo++;
4971 : }
4972 :
4973 : /*
4974 : * Initialize RETURNING projections if needed.
4975 : */
4976 120744 : if (returningLists)
4977 : {
4978 : TupleTableSlot *slot;
4979 : ExprContext *econtext;
4980 :
4981 : /*
4982 : * Initialize result tuple slot and assign its rowtype using the plan
4983 : * node's declared targetlist, which the planner set up to be the same
4984 : * as the first (before runtime pruning) RETURNING list. We assume
4985 : * all the result rels will produce compatible output.
4986 : */
4987 4734 : ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
4988 4734 : slot = mtstate->ps.ps_ResultTupleSlot;
4989 :
4990 : /* Need an econtext too */
4991 4734 : if (mtstate->ps.ps_ExprContext == NULL)
4992 4734 : ExecAssignExprContext(estate, &mtstate->ps);
4993 4734 : econtext = mtstate->ps.ps_ExprContext;
4994 :
4995 : /*
4996 : * Build a projection for each result rel.
4997 : */
4998 4734 : resultRelInfo = mtstate->resultRelInfo;
4999 9818 : foreach(l, returningLists)
5000 : {
5001 5084 : List *rlist = (List *) lfirst(l);
5002 :
5003 5084 : resultRelInfo->ri_returningList = rlist;
5004 5084 : resultRelInfo->ri_projectReturning =
5005 5084 : ExecBuildProjectionInfo(rlist, econtext, slot, &mtstate->ps,
5006 5084 : resultRelInfo->ri_RelationDesc->rd_att);
5007 5084 : resultRelInfo++;
5008 : }
5009 : }
5010 : else
5011 : {
5012 : /*
5013 : * We still must construct a dummy result tuple type, because InitPlan
5014 : * expects one (maybe should change that?).
5015 : */
5016 116010 : ExecInitResultTypeTL(&mtstate->ps);
5017 :
5018 116010 : mtstate->ps.ps_ExprContext = NULL;
5019 : }
5020 :
5021 : /* Set the list of arbiter indexes if needed for ON CONFLICT */
5022 120744 : resultRelInfo = mtstate->resultRelInfo;
5023 120744 : if (node->onConflictAction != ONCONFLICT_NONE)
5024 : {
5025 : /* insert may only have one relation, inheritance is not expanded */
5026 : Assert(total_nrels == 1);
5027 1414 : resultRelInfo->ri_onConflictArbiterIndexes = node->arbiterIndexes;
5028 : }
5029 :
5030 : /*
5031 : * If needed, Initialize target list, projection and qual for ON CONFLICT
5032 : * DO UPDATE.
5033 : */
5034 120744 : if (node->onConflictAction == ONCONFLICT_UPDATE)
5035 : {
5036 940 : OnConflictSetState *onconfl = makeNode(OnConflictSetState);
5037 : ExprContext *econtext;
5038 : TupleDesc relationDesc;
5039 :
5040 : /* already exists if created by RETURNING processing above */
5041 940 : if (mtstate->ps.ps_ExprContext == NULL)
5042 650 : ExecAssignExprContext(estate, &mtstate->ps);
5043 :
5044 940 : econtext = mtstate->ps.ps_ExprContext;
5045 940 : relationDesc = resultRelInfo->ri_RelationDesc->rd_att;
5046 :
5047 : /* create state for DO UPDATE SET operation */
5048 940 : resultRelInfo->ri_onConflict = onconfl;
5049 :
5050 : /* initialize slot for the existing tuple */
5051 940 : onconfl->oc_Existing =
5052 940 : table_slot_create(resultRelInfo->ri_RelationDesc,
5053 940 : &mtstate->ps.state->es_tupleTable);
5054 :
5055 : /*
5056 : * Create the tuple slot for the UPDATE SET projection. We want a slot
5057 : * of the table's type here, because the slot will be used to insert
5058 : * into the table, and for RETURNING processing - which may access
5059 : * system attributes.
5060 : */
5061 940 : onconfl->oc_ProjSlot =
5062 940 : table_slot_create(resultRelInfo->ri_RelationDesc,
5063 940 : &mtstate->ps.state->es_tupleTable);
5064 :
5065 : /* build UPDATE SET projection state */
5066 940 : onconfl->oc_ProjInfo =
5067 940 : ExecBuildUpdateProjection(node->onConflictSet,
5068 : true,
5069 : node->onConflictCols,
5070 : relationDesc,
5071 : econtext,
5072 : onconfl->oc_ProjSlot,
5073 : &mtstate->ps);
5074 :
5075 : /* initialize state to evaluate the WHERE clause, if any */
5076 940 : if (node->onConflictWhere)
5077 : {
5078 : ExprState *qualexpr;
5079 :
5080 176 : qualexpr = ExecInitQual((List *) node->onConflictWhere,
5081 : &mtstate->ps);
5082 176 : onconfl->oc_WhereClause = qualexpr;
5083 : }
5084 : }
5085 :
5086 : /*
5087 : * If we have any secondary relations in an UPDATE or DELETE, they need to
5088 : * be treated like non-locked relations in SELECT FOR UPDATE, i.e., the
5089 : * EvalPlanQual mechanism needs to be told about them. This also goes for
5090 : * the source relations in a MERGE. Locate the relevant ExecRowMarks.
5091 : */
5092 120744 : arowmarks = NIL;
5093 123608 : foreach(l, node->rowMarks)
5094 : {
5095 2864 : PlanRowMark *rc = lfirst_node(PlanRowMark, l);
5096 : ExecRowMark *erm;
5097 : ExecAuxRowMark *aerm;
5098 :
5099 : /*
5100 : * Ignore "parent" rowmarks, because they are irrelevant at runtime.
5101 : * Also ignore the rowmarks belonging to child tables that have been
5102 : * pruned in ExecDoInitialPruning().
5103 : */
5104 2864 : if (rc->isParent ||
5105 2722 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
5106 596 : continue;
5107 :
5108 : /* Find ExecRowMark and build ExecAuxRowMark */
5109 2268 : erm = ExecFindRowMark(estate, rc->rti, false);
5110 2268 : aerm = ExecBuildAuxRowMark(erm, subplan->targetlist);
5111 2268 : arowmarks = lappend(arowmarks, aerm);
5112 : }
5113 :
5114 : /* For a MERGE command, initialize its state */
5115 120744 : if (mtstate->operation == CMD_MERGE)
5116 1616 : ExecInitMerge(mtstate, estate);
5117 :
5118 120744 : EvalPlanQualSetPlan(&mtstate->mt_epqstate, subplan, arowmarks);
5119 :
5120 : /*
5121 : * If there are a lot of result relations, use a hash table to speed the
5122 : * lookups. If there are not a lot, a simple linear search is faster.
5123 : *
5124 : * It's not clear where the threshold is, but try 64 for starters. In a
5125 : * debugging build, use a small threshold so that we get some test
5126 : * coverage of both code paths.
5127 : */
5128 : #ifdef USE_ASSERT_CHECKING
5129 : #define MT_NRELS_HASH 4
5130 : #else
5131 : #define MT_NRELS_HASH 64
5132 : #endif
5133 120744 : if (nrels >= MT_NRELS_HASH)
5134 : {
5135 : HASHCTL hash_ctl;
5136 :
5137 0 : hash_ctl.keysize = sizeof(Oid);
5138 0 : hash_ctl.entrysize = sizeof(MTTargetRelLookup);
5139 0 : hash_ctl.hcxt = CurrentMemoryContext;
5140 0 : mtstate->mt_resultOidHash =
5141 0 : hash_create("ModifyTable target hash",
5142 : nrels, &hash_ctl,
5143 : HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
5144 0 : for (i = 0; i < nrels; i++)
5145 : {
5146 : Oid hashkey;
5147 : MTTargetRelLookup *mtlookup;
5148 : bool found;
5149 :
5150 0 : resultRelInfo = &mtstate->resultRelInfo[i];
5151 0 : hashkey = RelationGetRelid(resultRelInfo->ri_RelationDesc);
5152 : mtlookup = (MTTargetRelLookup *)
5153 0 : hash_search(mtstate->mt_resultOidHash, &hashkey,
5154 : HASH_ENTER, &found);
5155 : Assert(!found);
5156 0 : mtlookup->relationIndex = i;
5157 : }
5158 : }
5159 : else
5160 120744 : mtstate->mt_resultOidHash = NULL;
5161 :
5162 : /*
5163 : * Determine if the FDW supports batch insert and determine the batch size
5164 : * (a FDW may support batching, but it may be disabled for the
5165 : * server/table).
5166 : *
5167 : * We only do this for INSERT, so that for UPDATE/DELETE the batch size
5168 : * remains set to 0.
5169 : */
5170 120744 : if (operation == CMD_INSERT)
5171 : {
5172 : /* insert may only have one relation, inheritance is not expanded */
5173 : Assert(total_nrels == 1);
5174 92498 : resultRelInfo = mtstate->resultRelInfo;
5175 92498 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5176 92498 : resultRelInfo->ri_FdwRoutine != NULL &&
5177 176 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize &&
5178 176 : resultRelInfo->ri_FdwRoutine->ExecForeignBatchInsert)
5179 : {
5180 176 : resultRelInfo->ri_BatchSize =
5181 176 : resultRelInfo->ri_FdwRoutine->GetForeignModifyBatchSize(resultRelInfo);
5182 176 : Assert(resultRelInfo->ri_BatchSize >= 1);
5183 : }
5184 : else
5185 92322 : resultRelInfo->ri_BatchSize = 1;
5186 : }
5187 :
5188 : /*
5189 : * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
5190 : * to estate->es_auxmodifytables so that it will be run to completion by
5191 : * ExecPostprocessPlan. (It'd actually work fine to add the primary
5192 : * ModifyTable node too, but there's no need.) Note the use of lcons not
5193 : * lappend: we need later-initialized ModifyTable nodes to be shut down
5194 : * before earlier ones. This ensures that we don't throw away RETURNING
5195 : * rows that need to be seen by a later CTE subplan.
5196 : */
5197 120744 : if (!mtstate->canSetTag)
5198 956 : estate->es_auxmodifytables = lcons(mtstate,
5199 : estate->es_auxmodifytables);
5200 :
5201 120744 : return mtstate;
5202 : }
5203 :
5204 : /* ----------------------------------------------------------------
5205 : * ExecEndModifyTable
5206 : *
5207 : * Shuts down the plan.
5208 : *
5209 : * Returns nothing of interest.
5210 : * ----------------------------------------------------------------
5211 : */
5212 : void
5213 116292 : ExecEndModifyTable(ModifyTableState *node)
5214 : {
5215 : int i;
5216 :
5217 : /*
5218 : * Allow any FDWs to shut down
5219 : */
5220 234680 : for (i = 0; i < node->mt_nrels; i++)
5221 : {
5222 : int j;
5223 118388 : ResultRelInfo *resultRelInfo = node->resultRelInfo + i;
5224 :
5225 118388 : if (!resultRelInfo->ri_usesFdwDirectModify &&
5226 118196 : resultRelInfo->ri_FdwRoutine != NULL &&
5227 312 : resultRelInfo->ri_FdwRoutine->EndForeignModify != NULL)
5228 312 : resultRelInfo->ri_FdwRoutine->EndForeignModify(node->ps.state,
5229 : resultRelInfo);
5230 :
5231 : /*
5232 : * Cleanup the initialized batch slots. This only matters for FDWs
5233 : * with batching, but the other cases will have ri_NumSlotsInitialized
5234 : * == 0.
5235 : */
5236 118444 : for (j = 0; j < resultRelInfo->ri_NumSlotsInitialized; j++)
5237 : {
5238 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_Slots[j]);
5239 56 : ExecDropSingleTupleTableSlot(resultRelInfo->ri_PlanSlots[j]);
5240 : }
5241 : }
5242 :
5243 : /*
5244 : * Close all the partitioned tables, leaf partitions, and their indices
5245 : * and release the slot used for tuple routing, if set.
5246 : */
5247 116292 : if (node->mt_partition_tuple_routing)
5248 : {
5249 5696 : ExecCleanupTupleRouting(node, node->mt_partition_tuple_routing);
5250 :
5251 5696 : if (node->mt_root_tuple_slot)
5252 660 : ExecDropSingleTupleTableSlot(node->mt_root_tuple_slot);
5253 : }
5254 :
5255 : /*
5256 : * Terminate EPQ execution if active
5257 : */
5258 116292 : EvalPlanQualEnd(&node->mt_epqstate);
5259 :
5260 : /*
5261 : * shut down subplan
5262 : */
5263 116292 : ExecEndNode(outerPlanState(node));
5264 116292 : }
5265 :
5266 : void
5267 0 : ExecReScanModifyTable(ModifyTableState *node)
5268 : {
5269 : /*
5270 : * Currently, we don't need to support rescan on ModifyTable nodes. The
5271 : * semantics of that would be a bit debatable anyway.
5272 : */
5273 0 : elog(ERROR, "ExecReScanModifyTable is not implemented");
5274 : }
|