Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * trigger.c
4 : * PostgreSQL TRIGGERs support code.
5 : *
6 : * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : * IDENTIFICATION
10 : * src/backend/commands/trigger.c
11 : *
12 : *-------------------------------------------------------------------------
13 : */
14 : #include "postgres.h"
15 :
16 : #include "access/genam.h"
17 : #include "access/htup_details.h"
18 : #include "access/relation.h"
19 : #include "access/sysattr.h"
20 : #include "access/table.h"
21 : #include "access/tableam.h"
22 : #include "access/xact.h"
23 : #include "catalog/catalog.h"
24 : #include "catalog/dependency.h"
25 : #include "catalog/indexing.h"
26 : #include "catalog/objectaccess.h"
27 : #include "catalog/partition.h"
28 : #include "catalog/pg_constraint.h"
29 : #include "catalog/pg_inherits.h"
30 : #include "catalog/pg_proc.h"
31 : #include "catalog/pg_trigger.h"
32 : #include "catalog/pg_type.h"
33 : #include "commands/dbcommands.h"
34 : #include "commands/trigger.h"
35 : #include "executor/executor.h"
36 : #include "miscadmin.h"
37 : #include "nodes/bitmapset.h"
38 : #include "nodes/makefuncs.h"
39 : #include "optimizer/optimizer.h"
40 : #include "parser/parse_clause.h"
41 : #include "parser/parse_collate.h"
42 : #include "parser/parse_func.h"
43 : #include "parser/parse_relation.h"
44 : #include "partitioning/partdesc.h"
45 : #include "pgstat.h"
46 : #include "rewrite/rewriteManip.h"
47 : #include "storage/lmgr.h"
48 : #include "utils/acl.h"
49 : #include "utils/builtins.h"
50 : #include "utils/fmgroids.h"
51 : #include "utils/guc_hooks.h"
52 : #include "utils/inval.h"
53 : #include "utils/lsyscache.h"
54 : #include "utils/memutils.h"
55 : #include "utils/plancache.h"
56 : #include "utils/rel.h"
57 : #include "utils/snapmgr.h"
58 : #include "utils/syscache.h"
59 : #include "utils/tuplestore.h"
60 :
61 :
62 : /* GUC variables */
63 : int SessionReplicationRole = SESSION_REPLICATION_ROLE_ORIGIN;
64 :
65 : /* How many levels deep into trigger execution are we? */
66 : static int MyTriggerDepth = 0;
67 :
68 : /* Local function prototypes */
69 : static void renametrig_internal(Relation tgrel, Relation targetrel,
70 : HeapTuple trigtup, const char *newname,
71 : const char *expected_name);
72 : static void renametrig_partition(Relation tgrel, Oid partitionId,
73 : Oid parentTriggerOid, const char *newname,
74 : const char *expected_name);
75 : static void SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger);
76 : static bool GetTupleForTrigger(EState *estate,
77 : EPQState *epqstate,
78 : ResultRelInfo *relinfo,
79 : ItemPointer tid,
80 : LockTupleMode lockmode,
81 : TupleTableSlot *oldslot,
82 : TupleTableSlot **epqslot,
83 : TM_Result *tmresultp,
84 : TM_FailureData *tmfdp);
85 : static bool TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
86 : Trigger *trigger, TriggerEvent event,
87 : Bitmapset *modifiedCols,
88 : TupleTableSlot *oldslot, TupleTableSlot *newslot);
89 : static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
90 : int tgindx,
91 : FmgrInfo *finfo,
92 : Instrumentation *instr,
93 : MemoryContext per_tuple_context);
94 : static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
95 : ResultRelInfo *src_partinfo,
96 : ResultRelInfo *dst_partinfo,
97 : int event, bool row_trigger,
98 : TupleTableSlot *oldslot, TupleTableSlot *newslot,
99 : List *recheckIndexes, Bitmapset *modifiedCols,
100 : TransitionCaptureState *transition_capture,
101 : bool is_crosspart_update);
102 : static void AfterTriggerEnlargeQueryState(void);
103 : static bool before_stmt_triggers_fired(Oid relid, CmdType cmdType);
104 :
105 :
106 : /*
107 : * Create a trigger. Returns the address of the created trigger.
108 : *
109 : * queryString is the source text of the CREATE TRIGGER command.
110 : * This must be supplied if a whenClause is specified, else it can be NULL.
111 : *
112 : * relOid, if nonzero, is the relation on which the trigger should be
113 : * created. If zero, the name provided in the statement will be looked up.
114 : *
115 : * refRelOid, if nonzero, is the relation to which the constraint trigger
116 : * refers. If zero, the constraint relation name provided in the statement
117 : * will be looked up as needed.
118 : *
119 : * constraintOid, if nonzero, says that this trigger is being created
120 : * internally to implement that constraint. A suitable pg_depend entry will
121 : * be made to link the trigger to that constraint. constraintOid is zero when
122 : * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT
123 : * TRIGGER, we build a pg_constraint entry internally.)
124 : *
125 : * indexOid, if nonzero, is the OID of an index associated with the constraint.
126 : * We do nothing with this except store it into pg_trigger.tgconstrindid;
127 : * but when creating a trigger for a deferrable unique constraint on a
128 : * partitioned table, its children are looked up. Note we don't cope with
129 : * invalid indexes in that case.
130 : *
131 : * funcoid, if nonzero, is the OID of the function to invoke. When this is
132 : * given, stmt->funcname is ignored.
133 : *
134 : * parentTriggerOid, if nonzero, is a trigger that begets this one; so that
135 : * if that trigger is dropped, this one should be too. There are two cases
136 : * when a nonzero value is passed for this: 1) when this function recurses to
137 : * create the trigger on partitions, 2) when creating child foreign key
138 : * triggers; see CreateFKCheckTrigger() and createForeignKeyActionTriggers().
139 : *
140 : * If whenClause is passed, it is an already-transformed expression for
141 : * WHEN. In this case, we ignore any that may come in stmt->whenClause.
142 : *
143 : * If isInternal is true then this is an internally-generated trigger.
144 : * This argument sets the tgisinternal field of the pg_trigger entry, and
145 : * if true causes us to modify the given trigger name to ensure uniqueness.
146 : *
147 : * When isInternal is not true we require ACL_TRIGGER permissions on the
148 : * relation, as well as ACL_EXECUTE on the trigger function. For internal
149 : * triggers the caller must apply any required permission checks.
150 : *
151 : * When called on partitioned tables, this function recurses to create the
152 : * trigger on all the partitions, except if isInternal is true, in which
153 : * case caller is expected to execute recursion on its own. in_partition
154 : * indicates such a recursive call; outside callers should pass "false"
155 : * (but see CloneRowTriggersToPartition).
156 : */
157 : ObjectAddress
158 14620 : CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
159 : Oid relOid, Oid refRelOid, Oid constraintOid, Oid indexOid,
160 : Oid funcoid, Oid parentTriggerOid, Node *whenClause,
161 : bool isInternal, bool in_partition)
162 : {
163 : return
164 14620 : CreateTriggerFiringOn(stmt, queryString, relOid, refRelOid,
165 : constraintOid, indexOid, funcoid,
166 : parentTriggerOid, whenClause, isInternal,
167 : in_partition, TRIGGER_FIRES_ON_ORIGIN);
168 : }
169 :
170 : /*
171 : * Like the above; additionally the firing condition
172 : * (always/origin/replica/disabled) can be specified.
173 : */
174 : ObjectAddress
175 15436 : CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
176 : Oid relOid, Oid refRelOid, Oid constraintOid,
177 : Oid indexOid, Oid funcoid, Oid parentTriggerOid,
178 : Node *whenClause, bool isInternal, bool in_partition,
179 : char trigger_fires_when)
180 : {
181 : int16 tgtype;
182 : int ncolumns;
183 : int16 *columns;
184 : int2vector *tgattr;
185 : List *whenRtable;
186 : char *qual;
187 : Datum values[Natts_pg_trigger];
188 : bool nulls[Natts_pg_trigger];
189 : Relation rel;
190 : AclResult aclresult;
191 : Relation tgrel;
192 : Relation pgrel;
193 15436 : HeapTuple tuple = NULL;
194 : Oid funcrettype;
195 15436 : Oid trigoid = InvalidOid;
196 : char internaltrigname[NAMEDATALEN];
197 : char *trigname;
198 15436 : Oid constrrelid = InvalidOid;
199 : ObjectAddress myself,
200 : referenced;
201 15436 : char *oldtablename = NULL;
202 15436 : char *newtablename = NULL;
203 : bool partition_recurse;
204 15436 : bool trigger_exists = false;
205 15436 : Oid existing_constraint_oid = InvalidOid;
206 15436 : bool existing_isInternal = false;
207 15436 : bool existing_isClone = false;
208 :
209 15436 : if (OidIsValid(relOid))
210 12348 : rel = table_open(relOid, ShareRowExclusiveLock);
211 : else
212 3088 : rel = table_openrv(stmt->relation, ShareRowExclusiveLock);
213 :
214 : /*
215 : * Triggers must be on tables or views, and there are additional
216 : * relation-type-specific restrictions.
217 : */
218 15436 : if (rel->rd_rel->relkind == RELKIND_RELATION)
219 : {
220 : /* Tables can't have INSTEAD OF triggers */
221 12674 : if (stmt->timing != TRIGGER_TYPE_BEFORE &&
222 11386 : stmt->timing != TRIGGER_TYPE_AFTER)
223 18 : ereport(ERROR,
224 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
225 : errmsg("\"%s\" is a table",
226 : RelationGetRelationName(rel)),
227 : errdetail("Tables cannot have INSTEAD OF triggers.")));
228 : }
229 2762 : else if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
230 : {
231 : /* Partitioned tables can't have INSTEAD OF triggers */
232 2454 : if (stmt->timing != TRIGGER_TYPE_BEFORE &&
233 2352 : stmt->timing != TRIGGER_TYPE_AFTER)
234 6 : ereport(ERROR,
235 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
236 : errmsg("\"%s\" is a table",
237 : RelationGetRelationName(rel)),
238 : errdetail("Tables cannot have INSTEAD OF triggers.")));
239 :
240 : /*
241 : * FOR EACH ROW triggers have further restrictions
242 : */
243 2448 : if (stmt->row)
244 : {
245 : /*
246 : * Disallow use of transition tables.
247 : *
248 : * Note that we have another restriction about transition tables
249 : * in partitions; search for 'has_superclass' below for an
250 : * explanation. The check here is just to protect from the fact
251 : * that if we allowed it here, the creation would succeed for a
252 : * partitioned table with no partitions, but would be blocked by
253 : * the other restriction when the first partition was created,
254 : * which is very unfriendly behavior.
255 : */
256 2242 : if (stmt->transitionRels != NIL)
257 6 : ereport(ERROR,
258 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
259 : errmsg("\"%s\" is a partitioned table",
260 : RelationGetRelationName(rel)),
261 : errdetail("ROW triggers with transition tables are not supported on partitioned tables.")));
262 : }
263 : }
264 308 : else if (rel->rd_rel->relkind == RELKIND_VIEW)
265 : {
266 : /*
267 : * Views can have INSTEAD OF triggers (which we check below are
268 : * row-level), or statement-level BEFORE/AFTER triggers.
269 : */
270 204 : if (stmt->timing != TRIGGER_TYPE_INSTEAD && stmt->row)
271 36 : ereport(ERROR,
272 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
273 : errmsg("\"%s\" is a view",
274 : RelationGetRelationName(rel)),
275 : errdetail("Views cannot have row-level BEFORE or AFTER triggers.")));
276 : /* Disallow TRUNCATE triggers on VIEWs */
277 168 : if (TRIGGER_FOR_TRUNCATE(stmt->events))
278 12 : ereport(ERROR,
279 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
280 : errmsg("\"%s\" is a view",
281 : RelationGetRelationName(rel)),
282 : errdetail("Views cannot have TRUNCATE triggers.")));
283 : }
284 104 : else if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
285 : {
286 104 : if (stmt->timing != TRIGGER_TYPE_BEFORE &&
287 54 : stmt->timing != TRIGGER_TYPE_AFTER)
288 0 : ereport(ERROR,
289 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
290 : errmsg("\"%s\" is a foreign table",
291 : RelationGetRelationName(rel)),
292 : errdetail("Foreign tables cannot have INSTEAD OF triggers.")));
293 :
294 : /*
295 : * We disallow constraint triggers to protect the assumption that
296 : * triggers on FKs can't be deferred. See notes with AfterTriggers
297 : * data structures, below.
298 : */
299 104 : if (stmt->isconstraint)
300 6 : ereport(ERROR,
301 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
302 : errmsg("\"%s\" is a foreign table",
303 : RelationGetRelationName(rel)),
304 : errdetail("Foreign tables cannot have constraint triggers.")));
305 : }
306 : else
307 0 : ereport(ERROR,
308 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
309 : errmsg("relation \"%s\" cannot have triggers",
310 : RelationGetRelationName(rel)),
311 : errdetail_relkind_not_supported(rel->rd_rel->relkind)));
312 :
313 15352 : if (!allowSystemTableMods && IsSystemRelation(rel))
314 2 : ereport(ERROR,
315 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
316 : errmsg("permission denied: \"%s\" is a system catalog",
317 : RelationGetRelationName(rel))));
318 :
319 15350 : if (stmt->isconstraint)
320 : {
321 : /*
322 : * We must take a lock on the target relation to protect against
323 : * concurrent drop. It's not clear that AccessShareLock is strong
324 : * enough, but we certainly need at least that much... otherwise, we
325 : * might end up creating a pg_constraint entry referencing a
326 : * nonexistent table.
327 : */
328 11682 : if (OidIsValid(refRelOid))
329 : {
330 11418 : LockRelationOid(refRelOid, AccessShareLock);
331 11418 : constrrelid = refRelOid;
332 : }
333 264 : else if (stmt->constrrel != NULL)
334 24 : constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock,
335 : false);
336 : }
337 :
338 : /* permission checks */
339 15350 : if (!isInternal)
340 : {
341 3818 : aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
342 : ACL_TRIGGER);
343 3818 : if (aclresult != ACLCHECK_OK)
344 0 : aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
345 0 : RelationGetRelationName(rel));
346 :
347 3818 : if (OidIsValid(constrrelid))
348 : {
349 42 : aclresult = pg_class_aclcheck(constrrelid, GetUserId(),
350 : ACL_TRIGGER);
351 42 : if (aclresult != ACLCHECK_OK)
352 0 : aclcheck_error(aclresult, get_relkind_objtype(get_rel_relkind(constrrelid)),
353 0 : get_rel_name(constrrelid));
354 : }
355 : }
356 :
357 : /*
358 : * When called on a partitioned table to create a FOR EACH ROW trigger
359 : * that's not internal, we create one trigger for each partition, too.
360 : *
361 : * For that, we'd better hold lock on all of them ahead of time.
362 : */
363 18192 : partition_recurse = !isInternal && stmt->row &&
364 2842 : rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE;
365 15350 : if (partition_recurse)
366 398 : list_free(find_all_inheritors(RelationGetRelid(rel),
367 : ShareRowExclusiveLock, NULL));
368 :
369 : /* Compute tgtype */
370 15350 : TRIGGER_CLEAR_TYPE(tgtype);
371 15350 : if (stmt->row)
372 14374 : TRIGGER_SETT_ROW(tgtype);
373 15350 : tgtype |= stmt->timing;
374 15350 : tgtype |= stmt->events;
375 :
376 : /* Disallow ROW-level TRUNCATE triggers */
377 15350 : if (TRIGGER_FOR_ROW(tgtype) && TRIGGER_FOR_TRUNCATE(tgtype))
378 0 : ereport(ERROR,
379 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
380 : errmsg("TRUNCATE FOR EACH ROW triggers are not supported")));
381 :
382 : /* INSTEAD triggers must be row-level, and can't have WHEN or columns */
383 15350 : if (TRIGGER_FOR_INSTEAD(tgtype))
384 : {
385 114 : if (!TRIGGER_FOR_ROW(tgtype))
386 6 : ereport(ERROR,
387 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
388 : errmsg("INSTEAD OF triggers must be FOR EACH ROW")));
389 108 : if (stmt->whenClause)
390 6 : ereport(ERROR,
391 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
392 : errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
393 102 : if (stmt->columns != NIL)
394 6 : ereport(ERROR,
395 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
396 : errmsg("INSTEAD OF triggers cannot have column lists")));
397 : }
398 :
399 : /*
400 : * We don't yet support naming ROW transition variables, but the parser
401 : * recognizes the syntax so we can give a nicer message here.
402 : *
403 : * Per standard, REFERENCING TABLE names are only allowed on AFTER
404 : * triggers. Per standard, REFERENCING ROW names are not allowed with FOR
405 : * EACH STATEMENT. Per standard, each OLD/NEW, ROW/TABLE permutation is
406 : * only allowed once. Per standard, OLD may not be specified when
407 : * creating a trigger only for INSERT, and NEW may not be specified when
408 : * creating a trigger only for DELETE.
409 : *
410 : * Notice that the standard allows an AFTER ... FOR EACH ROW trigger to
411 : * reference both ROW and TABLE transition data.
412 : */
413 15332 : if (stmt->transitionRels != NIL)
414 : {
415 418 : List *varList = stmt->transitionRels;
416 : ListCell *lc;
417 :
418 914 : foreach(lc, varList)
419 : {
420 544 : TriggerTransition *tt = lfirst_node(TriggerTransition, lc);
421 :
422 544 : if (!(tt->isTable))
423 0 : ereport(ERROR,
424 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
425 : errmsg("ROW variable naming in the REFERENCING clause is not supported"),
426 : errhint("Use OLD TABLE or NEW TABLE for naming transition tables.")));
427 :
428 : /*
429 : * Because of the above test, we omit further ROW-related testing
430 : * below. If we later allow naming OLD and NEW ROW variables,
431 : * adjustments will be needed below.
432 : */
433 :
434 544 : if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
435 6 : ereport(ERROR,
436 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
437 : errmsg("\"%s\" is a foreign table",
438 : RelationGetRelationName(rel)),
439 : errdetail("Triggers on foreign tables cannot have transition tables.")));
440 :
441 538 : if (rel->rd_rel->relkind == RELKIND_VIEW)
442 6 : ereport(ERROR,
443 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
444 : errmsg("\"%s\" is a view",
445 : RelationGetRelationName(rel)),
446 : errdetail("Triggers on views cannot have transition tables.")));
447 :
448 : /*
449 : * We currently don't allow row-level triggers with transition
450 : * tables on partition or inheritance children. Such triggers
451 : * would somehow need to see tuples converted to the format of the
452 : * table they're attached to, and it's not clear which subset of
453 : * tuples each child should see. See also the prohibitions in
454 : * ATExecAttachPartition() and ATExecAddInherit().
455 : */
456 532 : if (TRIGGER_FOR_ROW(tgtype) && has_superclass(rel->rd_id))
457 : {
458 : /* Use appropriate error message. */
459 12 : if (rel->rd_rel->relispartition)
460 6 : ereport(ERROR,
461 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
462 : errmsg("ROW triggers with transition tables are not supported on partitions")));
463 : else
464 6 : ereport(ERROR,
465 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
466 : errmsg("ROW triggers with transition tables are not supported on inheritance children")));
467 : }
468 :
469 520 : if (stmt->timing != TRIGGER_TYPE_AFTER)
470 0 : ereport(ERROR,
471 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
472 : errmsg("transition table name can only be specified for an AFTER trigger")));
473 :
474 520 : if (TRIGGER_FOR_TRUNCATE(tgtype))
475 6 : ereport(ERROR,
476 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
477 : errmsg("TRUNCATE triggers with transition tables are not supported")));
478 :
479 : /*
480 : * We currently don't allow multi-event triggers ("INSERT OR
481 : * UPDATE") with transition tables, because it's not clear how to
482 : * handle INSERT ... ON CONFLICT statements which can fire both
483 : * INSERT and UPDATE triggers. We show the inserted tuples to
484 : * INSERT triggers and the updated tuples to UPDATE triggers, but
485 : * it's not yet clear what INSERT OR UPDATE trigger should see.
486 : * This restriction could be lifted if we can decide on the right
487 : * semantics in a later release.
488 : */
489 514 : if (((TRIGGER_FOR_INSERT(tgtype) ? 1 : 0) +
490 514 : (TRIGGER_FOR_UPDATE(tgtype) ? 1 : 0) +
491 514 : (TRIGGER_FOR_DELETE(tgtype) ? 1 : 0)) != 1)
492 6 : ereport(ERROR,
493 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
494 : errmsg("transition tables cannot be specified for triggers with more than one event")));
495 :
496 : /*
497 : * We currently don't allow column-specific triggers with
498 : * transition tables. Per spec, that seems to require
499 : * accumulating separate transition tables for each combination of
500 : * columns, which is a lot of work for a rather marginal feature.
501 : */
502 508 : if (stmt->columns != NIL)
503 6 : ereport(ERROR,
504 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
505 : errmsg("transition tables cannot be specified for triggers with column lists")));
506 :
507 : /*
508 : * We disallow constraint triggers with transition tables, to
509 : * protect the assumption that such triggers can't be deferred.
510 : * See notes with AfterTriggers data structures, below.
511 : *
512 : * Currently this is enforced by the grammar, so just Assert here.
513 : */
514 : Assert(!stmt->isconstraint);
515 :
516 502 : if (tt->isNew)
517 : {
518 264 : if (!(TRIGGER_FOR_INSERT(tgtype) ||
519 146 : TRIGGER_FOR_UPDATE(tgtype)))
520 0 : ereport(ERROR,
521 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
522 : errmsg("NEW TABLE can only be specified for an INSERT or UPDATE trigger")));
523 :
524 264 : if (newtablename != NULL)
525 0 : ereport(ERROR,
526 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
527 : errmsg("NEW TABLE cannot be specified multiple times")));
528 :
529 264 : newtablename = tt->name;
530 : }
531 : else
532 : {
533 238 : if (!(TRIGGER_FOR_DELETE(tgtype) ||
534 140 : TRIGGER_FOR_UPDATE(tgtype)))
535 6 : ereport(ERROR,
536 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
537 : errmsg("OLD TABLE can only be specified for a DELETE or UPDATE trigger")));
538 :
539 232 : if (oldtablename != NULL)
540 0 : ereport(ERROR,
541 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
542 : errmsg("OLD TABLE cannot be specified multiple times")));
543 :
544 232 : oldtablename = tt->name;
545 : }
546 : }
547 :
548 370 : if (newtablename != NULL && oldtablename != NULL &&
549 126 : strcmp(newtablename, oldtablename) == 0)
550 0 : ereport(ERROR,
551 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
552 : errmsg("OLD TABLE name and NEW TABLE name cannot be the same")));
553 : }
554 :
555 : /*
556 : * Parse the WHEN clause, if any and we weren't passed an already
557 : * transformed one.
558 : *
559 : * Note that as a side effect, we fill whenRtable when parsing. If we got
560 : * an already parsed clause, this does not occur, which is what we want --
561 : * no point in adding redundant dependencies below.
562 : */
563 15284 : if (!whenClause && stmt->whenClause)
564 110 : {
565 : ParseState *pstate;
566 : ParseNamespaceItem *nsitem;
567 : List *varList;
568 : ListCell *lc;
569 :
570 : /* Set up a pstate to parse with */
571 146 : pstate = make_parsestate(NULL);
572 146 : pstate->p_sourcetext = queryString;
573 :
574 : /*
575 : * Set up nsitems for OLD and NEW references.
576 : *
577 : * 'OLD' must always have varno equal to 1 and 'NEW' equal to 2.
578 : */
579 146 : nsitem = addRangeTableEntryForRelation(pstate, rel,
580 : AccessShareLock,
581 : makeAlias("old", NIL),
582 : false, false);
583 146 : addNSItemToQuery(pstate, nsitem, false, true, true);
584 146 : nsitem = addRangeTableEntryForRelation(pstate, rel,
585 : AccessShareLock,
586 : makeAlias("new", NIL),
587 : false, false);
588 146 : addNSItemToQuery(pstate, nsitem, false, true, true);
589 :
590 : /* Transform expression. Copy to be sure we don't modify original */
591 146 : whenClause = transformWhereClause(pstate,
592 146 : copyObject(stmt->whenClause),
593 : EXPR_KIND_TRIGGER_WHEN,
594 : "WHEN");
595 : /* we have to fix its collations too */
596 146 : assign_expr_collations(pstate, whenClause);
597 :
598 : /*
599 : * Check for disallowed references to OLD/NEW.
600 : *
601 : * NB: pull_var_clause is okay here only because we don't allow
602 : * subselects in WHEN clauses; it would fail to examine the contents
603 : * of subselects.
604 : */
605 146 : varList = pull_var_clause(whenClause, 0);
606 300 : foreach(lc, varList)
607 : {
608 190 : Var *var = (Var *) lfirst(lc);
609 :
610 190 : switch (var->varno)
611 : {
612 74 : case PRS2_OLD_VARNO:
613 74 : if (!TRIGGER_FOR_ROW(tgtype))
614 6 : ereport(ERROR,
615 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
616 : errmsg("statement trigger's WHEN condition cannot reference column values"),
617 : parser_errposition(pstate, var->location)));
618 68 : if (TRIGGER_FOR_INSERT(tgtype))
619 6 : ereport(ERROR,
620 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
621 : errmsg("INSERT trigger's WHEN condition cannot reference OLD values"),
622 : parser_errposition(pstate, var->location)));
623 : /* system columns are okay here */
624 62 : break;
625 116 : case PRS2_NEW_VARNO:
626 116 : if (!TRIGGER_FOR_ROW(tgtype))
627 0 : ereport(ERROR,
628 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
629 : errmsg("statement trigger's WHEN condition cannot reference column values"),
630 : parser_errposition(pstate, var->location)));
631 116 : if (TRIGGER_FOR_DELETE(tgtype))
632 6 : ereport(ERROR,
633 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
634 : errmsg("DELETE trigger's WHEN condition cannot reference NEW values"),
635 : parser_errposition(pstate, var->location)));
636 110 : if (var->varattno < 0 && TRIGGER_FOR_BEFORE(tgtype))
637 6 : ereport(ERROR,
638 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
639 : errmsg("BEFORE trigger's WHEN condition cannot reference NEW system columns"),
640 : parser_errposition(pstate, var->location)));
641 104 : if (TRIGGER_FOR_BEFORE(tgtype) &&
642 34 : var->varattno == 0 &&
643 12 : RelationGetDescr(rel)->constr &&
644 6 : RelationGetDescr(rel)->constr->has_generated_stored)
645 6 : ereport(ERROR,
646 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
647 : errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
648 : errdetail("A whole-row reference is used and the table contains generated columns."),
649 : parser_errposition(pstate, var->location)));
650 98 : if (TRIGGER_FOR_BEFORE(tgtype) &&
651 28 : var->varattno > 0 &&
652 22 : TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attgenerated)
653 6 : ereport(ERROR,
654 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
655 : errmsg("BEFORE trigger's WHEN condition cannot reference NEW generated columns"),
656 : errdetail("Column \"%s\" is a generated column.",
657 : NameStr(TupleDescAttr(RelationGetDescr(rel), var->varattno - 1)->attname)),
658 : parser_errposition(pstate, var->location)));
659 92 : break;
660 0 : default:
661 : /* can't happen without add_missing_from, so just elog */
662 0 : elog(ERROR, "trigger WHEN condition cannot contain references to other relations");
663 : break;
664 : }
665 : }
666 :
667 : /* we'll need the rtable for recordDependencyOnExpr */
668 110 : whenRtable = pstate->p_rtable;
669 :
670 110 : qual = nodeToString(whenClause);
671 :
672 110 : free_parsestate(pstate);
673 : }
674 15138 : else if (!whenClause)
675 : {
676 15096 : whenClause = NULL;
677 15096 : whenRtable = NIL;
678 15096 : qual = NULL;
679 : }
680 : else
681 : {
682 42 : qual = nodeToString(whenClause);
683 42 : whenRtable = NIL;
684 : }
685 :
686 : /*
687 : * Find and validate the trigger function.
688 : */
689 15248 : if (!OidIsValid(funcoid))
690 14432 : funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
691 15248 : if (!isInternal)
692 : {
693 3716 : aclresult = object_aclcheck(ProcedureRelationId, funcoid, GetUserId(), ACL_EXECUTE);
694 3716 : if (aclresult != ACLCHECK_OK)
695 0 : aclcheck_error(aclresult, OBJECT_FUNCTION,
696 0 : NameListToString(stmt->funcname));
697 : }
698 15248 : funcrettype = get_func_rettype(funcoid);
699 15248 : if (funcrettype != TRIGGEROID)
700 0 : ereport(ERROR,
701 : (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
702 : errmsg("function %s must return type %s",
703 : NameListToString(stmt->funcname), "trigger")));
704 :
705 : /*
706 : * Scan pg_trigger to see if there is already a trigger of the same name.
707 : * Skip this for internally generated triggers, since we'll modify the
708 : * name to be unique below.
709 : *
710 : * NOTE that this is cool only because we have ShareRowExclusiveLock on
711 : * the relation, so the trigger set won't be changing underneath us.
712 : */
713 15248 : tgrel = table_open(TriggerRelationId, RowExclusiveLock);
714 15248 : if (!isInternal)
715 : {
716 : ScanKeyData skeys[2];
717 : SysScanDesc tgscan;
718 :
719 3716 : ScanKeyInit(&skeys[0],
720 : Anum_pg_trigger_tgrelid,
721 : BTEqualStrategyNumber, F_OIDEQ,
722 : ObjectIdGetDatum(RelationGetRelid(rel)));
723 :
724 3716 : ScanKeyInit(&skeys[1],
725 : Anum_pg_trigger_tgname,
726 : BTEqualStrategyNumber, F_NAMEEQ,
727 3716 : CStringGetDatum(stmt->trigname));
728 :
729 3716 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
730 : NULL, 2, skeys);
731 :
732 : /* There should be at most one matching tuple */
733 3716 : if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
734 : {
735 102 : Form_pg_trigger oldtrigger = (Form_pg_trigger) GETSTRUCT(tuple);
736 :
737 102 : trigoid = oldtrigger->oid;
738 102 : existing_constraint_oid = oldtrigger->tgconstraint;
739 102 : existing_isInternal = oldtrigger->tgisinternal;
740 102 : existing_isClone = OidIsValid(oldtrigger->tgparentid);
741 102 : trigger_exists = true;
742 : /* copy the tuple to use in CatalogTupleUpdate() */
743 102 : tuple = heap_copytuple(tuple);
744 : }
745 3716 : systable_endscan(tgscan);
746 : }
747 :
748 15248 : if (!trigger_exists)
749 : {
750 : /* Generate the OID for the new trigger. */
751 15146 : trigoid = GetNewOidWithIndex(tgrel, TriggerOidIndexId,
752 : Anum_pg_trigger_oid);
753 : }
754 : else
755 : {
756 : /*
757 : * If OR REPLACE was specified, we'll replace the old trigger;
758 : * otherwise complain about the duplicate name.
759 : */
760 102 : if (!stmt->replace)
761 18 : ereport(ERROR,
762 : (errcode(ERRCODE_DUPLICATE_OBJECT),
763 : errmsg("trigger \"%s\" for relation \"%s\" already exists",
764 : stmt->trigname, RelationGetRelationName(rel))));
765 :
766 : /*
767 : * An internal trigger or a child trigger (isClone) cannot be replaced
768 : * by a user-defined trigger. However, skip this test when
769 : * in_partition, because then we're recursing from a partitioned table
770 : * and the check was made at the parent level.
771 : */
772 84 : if ((existing_isInternal || existing_isClone) &&
773 60 : !isInternal && !in_partition)
774 6 : ereport(ERROR,
775 : (errcode(ERRCODE_DUPLICATE_OBJECT),
776 : errmsg("trigger \"%s\" for relation \"%s\" is an internal or a child trigger",
777 : stmt->trigname, RelationGetRelationName(rel))));
778 :
779 : /*
780 : * It is not allowed to replace with a constraint trigger; gram.y
781 : * should have enforced this already.
782 : */
783 : Assert(!stmt->isconstraint);
784 :
785 : /*
786 : * It is not allowed to replace an existing constraint trigger,
787 : * either. (The reason for these restrictions is partly that it seems
788 : * difficult to deal with pending trigger events in such cases, and
789 : * partly that the command might imply changing the constraint's
790 : * properties as well, which doesn't seem nice.)
791 : */
792 78 : if (OidIsValid(existing_constraint_oid))
793 0 : ereport(ERROR,
794 : (errcode(ERRCODE_DUPLICATE_OBJECT),
795 : errmsg("trigger \"%s\" for relation \"%s\" is a constraint trigger",
796 : stmt->trigname, RelationGetRelationName(rel))));
797 : }
798 :
799 : /*
800 : * If it's a user-entered CREATE CONSTRAINT TRIGGER command, make a
801 : * corresponding pg_constraint entry.
802 : */
803 15224 : if (stmt->isconstraint && !OidIsValid(constraintOid))
804 : {
805 : /* Internal callers should have made their own constraints */
806 : Assert(!isInternal);
807 150 : constraintOid = CreateConstraintEntry(stmt->trigname,
808 150 : RelationGetNamespace(rel),
809 : CONSTRAINT_TRIGGER,
810 150 : stmt->deferrable,
811 150 : stmt->initdeferred,
812 : true,
813 : InvalidOid, /* no parent */
814 : RelationGetRelid(rel),
815 : NULL, /* no conkey */
816 : 0,
817 : 0,
818 : InvalidOid, /* no domain */
819 : InvalidOid, /* no index */
820 : InvalidOid, /* no foreign key */
821 : NULL,
822 : NULL,
823 : NULL,
824 : NULL,
825 : 0,
826 : ' ',
827 : ' ',
828 : NULL,
829 : 0,
830 : ' ',
831 : NULL, /* no exclusion */
832 : NULL, /* no check constraint */
833 : NULL,
834 : true, /* islocal */
835 : 0, /* inhcount */
836 : true, /* noinherit */
837 : false, /* conperiod */
838 : isInternal); /* is_internal */
839 : }
840 :
841 : /*
842 : * If trigger is internally generated, modify the provided trigger name to
843 : * ensure uniqueness by appending the trigger OID. (Callers will usually
844 : * supply a simple constant trigger name in these cases.)
845 : */
846 15224 : if (isInternal)
847 : {
848 11532 : snprintf(internaltrigname, sizeof(internaltrigname),
849 : "%s_%u", stmt->trigname, trigoid);
850 11532 : trigname = internaltrigname;
851 : }
852 : else
853 : {
854 : /* user-defined trigger; use the specified trigger name as-is */
855 3692 : trigname = stmt->trigname;
856 : }
857 :
858 : /*
859 : * Build the new pg_trigger tuple.
860 : */
861 15224 : memset(nulls, false, sizeof(nulls));
862 :
863 15224 : values[Anum_pg_trigger_oid - 1] = ObjectIdGetDatum(trigoid);
864 15224 : values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
865 15224 : values[Anum_pg_trigger_tgparentid - 1] = ObjectIdGetDatum(parentTriggerOid);
866 15224 : values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
867 : CStringGetDatum(trigname));
868 15224 : values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
869 15224 : values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
870 15224 : values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
871 15224 : values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
872 15224 : values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
873 15224 : values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
874 15224 : values[Anum_pg_trigger_tgconstraint - 1] = ObjectIdGetDatum(constraintOid);
875 15224 : values[Anum_pg_trigger_tgdeferrable - 1] = BoolGetDatum(stmt->deferrable);
876 15224 : values[Anum_pg_trigger_tginitdeferred - 1] = BoolGetDatum(stmt->initdeferred);
877 :
878 15224 : if (stmt->args)
879 : {
880 : ListCell *le;
881 : char *args;
882 484 : int16 nargs = list_length(stmt->args);
883 484 : int len = 0;
884 :
885 1250 : foreach(le, stmt->args)
886 : {
887 766 : char *ar = strVal(lfirst(le));
888 :
889 766 : len += strlen(ar) + 4;
890 6242 : for (; *ar; ar++)
891 : {
892 5476 : if (*ar == '\\')
893 0 : len++;
894 : }
895 : }
896 484 : args = (char *) palloc(len + 1);
897 484 : args[0] = '\0';
898 1250 : foreach(le, stmt->args)
899 : {
900 766 : char *s = strVal(lfirst(le));
901 766 : char *d = args + strlen(args);
902 :
903 6242 : while (*s)
904 : {
905 5476 : if (*s == '\\')
906 0 : *d++ = '\\';
907 5476 : *d++ = *s++;
908 : }
909 766 : strcpy(d, "\\000");
910 : }
911 484 : values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
912 484 : values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
913 : CStringGetDatum(args));
914 : }
915 : else
916 : {
917 14740 : values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(0);
918 14740 : values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
919 : CStringGetDatum(""));
920 : }
921 :
922 : /* build column number array if it's a column-specific trigger */
923 15224 : ncolumns = list_length(stmt->columns);
924 15224 : if (ncolumns == 0)
925 15124 : columns = NULL;
926 : else
927 : {
928 : ListCell *cell;
929 100 : int i = 0;
930 :
931 100 : columns = (int16 *) palloc(ncolumns * sizeof(int16));
932 208 : foreach(cell, stmt->columns)
933 : {
934 114 : char *name = strVal(lfirst(cell));
935 : int16 attnum;
936 : int j;
937 :
938 : /* Lookup column name. System columns are not allowed */
939 114 : attnum = attnameAttNum(rel, name, false);
940 114 : if (attnum == InvalidAttrNumber)
941 0 : ereport(ERROR,
942 : (errcode(ERRCODE_UNDEFINED_COLUMN),
943 : errmsg("column \"%s\" of relation \"%s\" does not exist",
944 : name, RelationGetRelationName(rel))));
945 :
946 : /* Check for duplicates */
947 122 : for (j = i - 1; j >= 0; j--)
948 : {
949 14 : if (columns[j] == attnum)
950 6 : ereport(ERROR,
951 : (errcode(ERRCODE_DUPLICATE_COLUMN),
952 : errmsg("column \"%s\" specified more than once",
953 : name)));
954 : }
955 :
956 108 : columns[i++] = attnum;
957 : }
958 : }
959 15218 : tgattr = buildint2vector(columns, ncolumns);
960 15218 : values[Anum_pg_trigger_tgattr - 1] = PointerGetDatum(tgattr);
961 :
962 : /* set tgqual if trigger has WHEN clause */
963 15218 : if (qual)
964 152 : values[Anum_pg_trigger_tgqual - 1] = CStringGetTextDatum(qual);
965 : else
966 15066 : nulls[Anum_pg_trigger_tgqual - 1] = true;
967 :
968 15218 : if (oldtablename)
969 232 : values[Anum_pg_trigger_tgoldtable - 1] = DirectFunctionCall1(namein,
970 : CStringGetDatum(oldtablename));
971 : else
972 14986 : nulls[Anum_pg_trigger_tgoldtable - 1] = true;
973 15218 : if (newtablename)
974 264 : values[Anum_pg_trigger_tgnewtable - 1] = DirectFunctionCall1(namein,
975 : CStringGetDatum(newtablename));
976 : else
977 14954 : nulls[Anum_pg_trigger_tgnewtable - 1] = true;
978 :
979 : /*
980 : * Insert or replace tuple in pg_trigger.
981 : */
982 15218 : if (!trigger_exists)
983 : {
984 15140 : tuple = heap_form_tuple(tgrel->rd_att, values, nulls);
985 15140 : CatalogTupleInsert(tgrel, tuple);
986 : }
987 : else
988 : {
989 : HeapTuple newtup;
990 :
991 78 : newtup = heap_form_tuple(tgrel->rd_att, values, nulls);
992 78 : CatalogTupleUpdate(tgrel, &tuple->t_self, newtup);
993 78 : heap_freetuple(newtup);
994 : }
995 :
996 15218 : heap_freetuple(tuple); /* free either original or new tuple */
997 15218 : table_close(tgrel, RowExclusiveLock);
998 :
999 15218 : pfree(DatumGetPointer(values[Anum_pg_trigger_tgname - 1]));
1000 15218 : pfree(DatumGetPointer(values[Anum_pg_trigger_tgargs - 1]));
1001 15218 : pfree(DatumGetPointer(values[Anum_pg_trigger_tgattr - 1]));
1002 15218 : if (oldtablename)
1003 232 : pfree(DatumGetPointer(values[Anum_pg_trigger_tgoldtable - 1]));
1004 15218 : if (newtablename)
1005 264 : pfree(DatumGetPointer(values[Anum_pg_trigger_tgnewtable - 1]));
1006 :
1007 : /*
1008 : * Update relation's pg_class entry; if necessary; and if not, send an SI
1009 : * message to make other backends (and this one) rebuild relcache entries.
1010 : */
1011 15218 : pgrel = table_open(RelationRelationId, RowExclusiveLock);
1012 15218 : tuple = SearchSysCacheCopy1(RELOID,
1013 : ObjectIdGetDatum(RelationGetRelid(rel)));
1014 15218 : if (!HeapTupleIsValid(tuple))
1015 0 : elog(ERROR, "cache lookup failed for relation %u",
1016 : RelationGetRelid(rel));
1017 15218 : if (!((Form_pg_class) GETSTRUCT(tuple))->relhastriggers)
1018 : {
1019 5950 : ((Form_pg_class) GETSTRUCT(tuple))->relhastriggers = true;
1020 :
1021 5950 : CatalogTupleUpdate(pgrel, &tuple->t_self, tuple);
1022 :
1023 5950 : CommandCounterIncrement();
1024 : }
1025 : else
1026 9268 : CacheInvalidateRelcacheByTuple(tuple);
1027 :
1028 15218 : heap_freetuple(tuple);
1029 15218 : table_close(pgrel, RowExclusiveLock);
1030 :
1031 : /*
1032 : * If we're replacing a trigger, flush all the old dependencies before
1033 : * recording new ones.
1034 : */
1035 15218 : if (trigger_exists)
1036 78 : deleteDependencyRecordsFor(TriggerRelationId, trigoid, true);
1037 :
1038 : /*
1039 : * Record dependencies for trigger. Always place a normal dependency on
1040 : * the function.
1041 : */
1042 15218 : myself.classId = TriggerRelationId;
1043 15218 : myself.objectId = trigoid;
1044 15218 : myself.objectSubId = 0;
1045 :
1046 15218 : referenced.classId = ProcedureRelationId;
1047 15218 : referenced.objectId = funcoid;
1048 15218 : referenced.objectSubId = 0;
1049 15218 : recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1050 :
1051 15218 : if (isInternal && OidIsValid(constraintOid))
1052 : {
1053 : /*
1054 : * Internally-generated trigger for a constraint, so make it an
1055 : * internal dependency of the constraint. We can skip depending on
1056 : * the relation(s), as there'll be an indirect dependency via the
1057 : * constraint.
1058 : */
1059 11532 : referenced.classId = ConstraintRelationId;
1060 11532 : referenced.objectId = constraintOid;
1061 11532 : referenced.objectSubId = 0;
1062 11532 : recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
1063 : }
1064 : else
1065 : {
1066 : /*
1067 : * User CREATE TRIGGER, so place dependencies. We make trigger be
1068 : * auto-dropped if its relation is dropped or if the FK relation is
1069 : * dropped. (Auto drop is compatible with our pre-7.3 behavior.)
1070 : */
1071 3686 : referenced.classId = RelationRelationId;
1072 3686 : referenced.objectId = RelationGetRelid(rel);
1073 3686 : referenced.objectSubId = 0;
1074 3686 : recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1075 :
1076 3686 : if (OidIsValid(constrrelid))
1077 : {
1078 42 : referenced.classId = RelationRelationId;
1079 42 : referenced.objectId = constrrelid;
1080 42 : referenced.objectSubId = 0;
1081 42 : recordDependencyOn(&myself, &referenced, DEPENDENCY_AUTO);
1082 : }
1083 : /* Not possible to have an index dependency in this case */
1084 : Assert(!OidIsValid(indexOid));
1085 :
1086 : /*
1087 : * If it's a user-specified constraint trigger, make the constraint
1088 : * internally dependent on the trigger instead of vice versa.
1089 : */
1090 3686 : if (OidIsValid(constraintOid))
1091 : {
1092 150 : referenced.classId = ConstraintRelationId;
1093 150 : referenced.objectId = constraintOid;
1094 150 : referenced.objectSubId = 0;
1095 150 : recordDependencyOn(&referenced, &myself, DEPENDENCY_INTERNAL);
1096 : }
1097 :
1098 : /*
1099 : * If it's a partition trigger, create the partition dependencies.
1100 : */
1101 3686 : if (OidIsValid(parentTriggerOid))
1102 : {
1103 804 : ObjectAddressSet(referenced, TriggerRelationId, parentTriggerOid);
1104 804 : recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
1105 804 : ObjectAddressSet(referenced, RelationRelationId, RelationGetRelid(rel));
1106 804 : recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_SEC);
1107 : }
1108 : }
1109 :
1110 : /* If column-specific trigger, add normal dependencies on columns */
1111 15218 : if (columns != NULL)
1112 : {
1113 : int i;
1114 :
1115 94 : referenced.classId = RelationRelationId;
1116 94 : referenced.objectId = RelationGetRelid(rel);
1117 196 : for (i = 0; i < ncolumns; i++)
1118 : {
1119 102 : referenced.objectSubId = columns[i];
1120 102 : recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
1121 : }
1122 : }
1123 :
1124 : /*
1125 : * If it has a WHEN clause, add dependencies on objects mentioned in the
1126 : * expression (eg, functions, as well as any columns used).
1127 : */
1128 15218 : if (whenRtable != NIL)
1129 110 : recordDependencyOnExpr(&myself, whenClause, whenRtable,
1130 : DEPENDENCY_NORMAL);
1131 :
1132 : /* Post creation hook for new trigger */
1133 15218 : InvokeObjectPostCreateHookArg(TriggerRelationId, trigoid, 0,
1134 : isInternal);
1135 :
1136 : /*
1137 : * Lastly, create the trigger on child relations, if needed.
1138 : */
1139 15218 : if (partition_recurse)
1140 : {
1141 386 : PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1142 : int i;
1143 : MemoryContext oldcxt,
1144 : perChildCxt;
1145 :
1146 386 : perChildCxt = AllocSetContextCreate(CurrentMemoryContext,
1147 : "part trig clone",
1148 : ALLOCSET_SMALL_SIZES);
1149 :
1150 : /*
1151 : * We don't currently expect to be called with a valid indexOid. If
1152 : * that ever changes then we'll need to write code here to find the
1153 : * corresponding child index.
1154 : */
1155 : Assert(!OidIsValid(indexOid));
1156 :
1157 386 : oldcxt = MemoryContextSwitchTo(perChildCxt);
1158 :
1159 : /* Iterate to create the trigger on each existing partition */
1160 1040 : for (i = 0; i < partdesc->nparts; i++)
1161 : {
1162 : CreateTrigStmt *childStmt;
1163 : Relation childTbl;
1164 : Node *qual;
1165 :
1166 660 : childTbl = table_open(partdesc->oids[i], ShareRowExclusiveLock);
1167 :
1168 : /*
1169 : * Initialize our fabricated parse node by copying the original
1170 : * one, then resetting fields that we pass separately.
1171 : */
1172 660 : childStmt = copyObject(stmt);
1173 660 : childStmt->funcname = NIL;
1174 660 : childStmt->whenClause = NULL;
1175 :
1176 : /* If there is a WHEN clause, create a modified copy of it */
1177 660 : qual = copyObject(whenClause);
1178 : qual = (Node *)
1179 660 : map_partition_varattnos((List *) qual, PRS2_OLD_VARNO,
1180 : childTbl, rel);
1181 : qual = (Node *)
1182 660 : map_partition_varattnos((List *) qual, PRS2_NEW_VARNO,
1183 : childTbl, rel);
1184 :
1185 660 : CreateTriggerFiringOn(childStmt, queryString,
1186 660 : partdesc->oids[i], refRelOid,
1187 : InvalidOid, InvalidOid,
1188 : funcoid, trigoid, qual,
1189 : isInternal, true, trigger_fires_when);
1190 :
1191 654 : table_close(childTbl, NoLock);
1192 :
1193 654 : MemoryContextReset(perChildCxt);
1194 : }
1195 :
1196 380 : MemoryContextSwitchTo(oldcxt);
1197 380 : MemoryContextDelete(perChildCxt);
1198 : }
1199 :
1200 : /* Keep lock on target rel until end of xact */
1201 15212 : table_close(rel, NoLock);
1202 :
1203 15212 : return myself;
1204 : }
1205 :
1206 : /*
1207 : * TriggerSetParentTrigger
1208 : * Set a partition's trigger as child of its parent trigger,
1209 : * or remove the linkage if parentTrigId is InvalidOid.
1210 : *
1211 : * This updates the constraint's pg_trigger row to show it as inherited, and
1212 : * adds PARTITION dependencies to prevent the trigger from being deleted
1213 : * on its own. Alternatively, reverse that.
1214 : */
1215 : void
1216 324 : TriggerSetParentTrigger(Relation trigRel,
1217 : Oid childTrigId,
1218 : Oid parentTrigId,
1219 : Oid childTableId)
1220 : {
1221 : SysScanDesc tgscan;
1222 : ScanKeyData skey[1];
1223 : Form_pg_trigger trigForm;
1224 : HeapTuple tuple,
1225 : newtup;
1226 : ObjectAddress depender;
1227 : ObjectAddress referenced;
1228 :
1229 : /*
1230 : * Find the trigger to delete.
1231 : */
1232 324 : ScanKeyInit(&skey[0],
1233 : Anum_pg_trigger_oid,
1234 : BTEqualStrategyNumber, F_OIDEQ,
1235 : ObjectIdGetDatum(childTrigId));
1236 :
1237 324 : tgscan = systable_beginscan(trigRel, TriggerOidIndexId, true,
1238 : NULL, 1, skey);
1239 :
1240 324 : tuple = systable_getnext(tgscan);
1241 324 : if (!HeapTupleIsValid(tuple))
1242 0 : elog(ERROR, "could not find tuple for trigger %u", childTrigId);
1243 324 : newtup = heap_copytuple(tuple);
1244 324 : trigForm = (Form_pg_trigger) GETSTRUCT(newtup);
1245 324 : if (OidIsValid(parentTrigId))
1246 : {
1247 : /* don't allow setting parent for a constraint that already has one */
1248 180 : if (OidIsValid(trigForm->tgparentid))
1249 0 : elog(ERROR, "trigger %u already has a parent trigger",
1250 : childTrigId);
1251 :
1252 180 : trigForm->tgparentid = parentTrigId;
1253 :
1254 180 : CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1255 :
1256 180 : ObjectAddressSet(depender, TriggerRelationId, childTrigId);
1257 :
1258 180 : ObjectAddressSet(referenced, TriggerRelationId, parentTrigId);
1259 180 : recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_PRI);
1260 :
1261 180 : ObjectAddressSet(referenced, RelationRelationId, childTableId);
1262 180 : recordDependencyOn(&depender, &referenced, DEPENDENCY_PARTITION_SEC);
1263 : }
1264 : else
1265 : {
1266 144 : trigForm->tgparentid = InvalidOid;
1267 :
1268 144 : CatalogTupleUpdate(trigRel, &tuple->t_self, newtup);
1269 :
1270 144 : deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1271 : TriggerRelationId,
1272 : DEPENDENCY_PARTITION_PRI);
1273 144 : deleteDependencyRecordsForClass(TriggerRelationId, childTrigId,
1274 : RelationRelationId,
1275 : DEPENDENCY_PARTITION_SEC);
1276 : }
1277 :
1278 324 : heap_freetuple(newtup);
1279 324 : systable_endscan(tgscan);
1280 324 : }
1281 :
1282 :
1283 : /*
1284 : * Guts of trigger deletion.
1285 : */
1286 : void
1287 13168 : RemoveTriggerById(Oid trigOid)
1288 : {
1289 : Relation tgrel;
1290 : SysScanDesc tgscan;
1291 : ScanKeyData skey[1];
1292 : HeapTuple tup;
1293 : Oid relid;
1294 : Relation rel;
1295 :
1296 13168 : tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1297 :
1298 : /*
1299 : * Find the trigger to delete.
1300 : */
1301 13168 : ScanKeyInit(&skey[0],
1302 : Anum_pg_trigger_oid,
1303 : BTEqualStrategyNumber, F_OIDEQ,
1304 : ObjectIdGetDatum(trigOid));
1305 :
1306 13168 : tgscan = systable_beginscan(tgrel, TriggerOidIndexId, true,
1307 : NULL, 1, skey);
1308 :
1309 13168 : tup = systable_getnext(tgscan);
1310 13168 : if (!HeapTupleIsValid(tup))
1311 0 : elog(ERROR, "could not find tuple for trigger %u", trigOid);
1312 :
1313 : /*
1314 : * Open and exclusive-lock the relation the trigger belongs to.
1315 : */
1316 13168 : relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
1317 :
1318 13168 : rel = table_open(relid, AccessExclusiveLock);
1319 :
1320 13168 : if (rel->rd_rel->relkind != RELKIND_RELATION &&
1321 2348 : rel->rd_rel->relkind != RELKIND_VIEW &&
1322 2218 : rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE &&
1323 2126 : rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE)
1324 0 : ereport(ERROR,
1325 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1326 : errmsg("relation \"%s\" cannot have triggers",
1327 : RelationGetRelationName(rel)),
1328 : errdetail_relkind_not_supported(rel->rd_rel->relkind)));
1329 :
1330 13168 : if (!allowSystemTableMods && IsSystemRelation(rel))
1331 0 : ereport(ERROR,
1332 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1333 : errmsg("permission denied: \"%s\" is a system catalog",
1334 : RelationGetRelationName(rel))));
1335 :
1336 : /*
1337 : * Delete the pg_trigger tuple.
1338 : */
1339 13168 : CatalogTupleDelete(tgrel, &tup->t_self);
1340 :
1341 13168 : systable_endscan(tgscan);
1342 13168 : table_close(tgrel, RowExclusiveLock);
1343 :
1344 : /*
1345 : * We do not bother to try to determine whether any other triggers remain,
1346 : * which would be needed in order to decide whether it's safe to clear the
1347 : * relation's relhastriggers. (In any case, there might be a concurrent
1348 : * process adding new triggers.) Instead, just force a relcache inval to
1349 : * make other backends (and this one too!) rebuild their relcache entries.
1350 : * There's no great harm in leaving relhastriggers true even if there are
1351 : * no triggers left.
1352 : */
1353 13168 : CacheInvalidateRelcache(rel);
1354 :
1355 : /* Keep lock on trigger's rel until end of xact */
1356 13168 : table_close(rel, NoLock);
1357 13168 : }
1358 :
1359 : /*
1360 : * get_trigger_oid - Look up a trigger by name to find its OID.
1361 : *
1362 : * If missing_ok is false, throw an error if trigger not found. If
1363 : * true, just return InvalidOid.
1364 : */
1365 : Oid
1366 752 : get_trigger_oid(Oid relid, const char *trigname, bool missing_ok)
1367 : {
1368 : Relation tgrel;
1369 : ScanKeyData skey[2];
1370 : SysScanDesc tgscan;
1371 : HeapTuple tup;
1372 : Oid oid;
1373 :
1374 : /*
1375 : * Find the trigger, verify permissions, set up object address
1376 : */
1377 752 : tgrel = table_open(TriggerRelationId, AccessShareLock);
1378 :
1379 752 : ScanKeyInit(&skey[0],
1380 : Anum_pg_trigger_tgrelid,
1381 : BTEqualStrategyNumber, F_OIDEQ,
1382 : ObjectIdGetDatum(relid));
1383 752 : ScanKeyInit(&skey[1],
1384 : Anum_pg_trigger_tgname,
1385 : BTEqualStrategyNumber, F_NAMEEQ,
1386 : CStringGetDatum(trigname));
1387 :
1388 752 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1389 : NULL, 2, skey);
1390 :
1391 752 : tup = systable_getnext(tgscan);
1392 :
1393 752 : if (!HeapTupleIsValid(tup))
1394 : {
1395 30 : if (!missing_ok)
1396 24 : ereport(ERROR,
1397 : (errcode(ERRCODE_UNDEFINED_OBJECT),
1398 : errmsg("trigger \"%s\" for table \"%s\" does not exist",
1399 : trigname, get_rel_name(relid))));
1400 6 : oid = InvalidOid;
1401 : }
1402 : else
1403 : {
1404 722 : oid = ((Form_pg_trigger) GETSTRUCT(tup))->oid;
1405 : }
1406 :
1407 728 : systable_endscan(tgscan);
1408 728 : table_close(tgrel, AccessShareLock);
1409 728 : return oid;
1410 : }
1411 :
1412 : /*
1413 : * Perform permissions and integrity checks before acquiring a relation lock.
1414 : */
1415 : static void
1416 42 : RangeVarCallbackForRenameTrigger(const RangeVar *rv, Oid relid, Oid oldrelid,
1417 : void *arg)
1418 : {
1419 : HeapTuple tuple;
1420 : Form_pg_class form;
1421 :
1422 42 : tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
1423 42 : if (!HeapTupleIsValid(tuple))
1424 0 : return; /* concurrently dropped */
1425 42 : form = (Form_pg_class) GETSTRUCT(tuple);
1426 :
1427 : /* only tables and views can have triggers */
1428 42 : if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW &&
1429 24 : form->relkind != RELKIND_FOREIGN_TABLE &&
1430 24 : form->relkind != RELKIND_PARTITIONED_TABLE)
1431 0 : ereport(ERROR,
1432 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1433 : errmsg("relation \"%s\" cannot have triggers",
1434 : rv->relname),
1435 : errdetail_relkind_not_supported(form->relkind)));
1436 :
1437 : /* you must own the table to rename one of its triggers */
1438 42 : if (!object_ownercheck(RelationRelationId, relid, GetUserId()))
1439 0 : aclcheck_error(ACLCHECK_NOT_OWNER, get_relkind_objtype(get_rel_relkind(relid)), rv->relname);
1440 42 : if (!allowSystemTableMods && IsSystemClass(relid, form))
1441 2 : ereport(ERROR,
1442 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1443 : errmsg("permission denied: \"%s\" is a system catalog",
1444 : rv->relname)));
1445 :
1446 40 : ReleaseSysCache(tuple);
1447 : }
1448 :
1449 : /*
1450 : * renametrig - changes the name of a trigger on a relation
1451 : *
1452 : * trigger name is changed in trigger catalog.
1453 : * No record of the previous name is kept.
1454 : *
1455 : * get proper relrelation from relation catalog (if not arg)
1456 : * scan trigger catalog
1457 : * for name conflict (within rel)
1458 : * for original trigger (if not arg)
1459 : * modify tgname in trigger tuple
1460 : * update row in catalog
1461 : */
1462 : ObjectAddress
1463 40 : renametrig(RenameStmt *stmt)
1464 : {
1465 : Oid tgoid;
1466 : Relation targetrel;
1467 : Relation tgrel;
1468 : HeapTuple tuple;
1469 : SysScanDesc tgscan;
1470 : ScanKeyData key[2];
1471 : Oid relid;
1472 : ObjectAddress address;
1473 :
1474 : /*
1475 : * Look up name, check permissions, and acquire lock (which we will NOT
1476 : * release until end of transaction).
1477 : */
1478 40 : relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
1479 : 0,
1480 : RangeVarCallbackForRenameTrigger,
1481 : NULL);
1482 :
1483 : /* Have lock already, so just need to build relcache entry. */
1484 38 : targetrel = relation_open(relid, NoLock);
1485 :
1486 : /*
1487 : * On partitioned tables, this operation recurses to partitions. Lock all
1488 : * tables upfront.
1489 : */
1490 38 : if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1491 24 : (void) find_all_inheritors(relid, AccessExclusiveLock, NULL);
1492 :
1493 38 : tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1494 :
1495 : /*
1496 : * Search for the trigger to modify.
1497 : */
1498 38 : ScanKeyInit(&key[0],
1499 : Anum_pg_trigger_tgrelid,
1500 : BTEqualStrategyNumber, F_OIDEQ,
1501 : ObjectIdGetDatum(relid));
1502 38 : ScanKeyInit(&key[1],
1503 : Anum_pg_trigger_tgname,
1504 : BTEqualStrategyNumber, F_NAMEEQ,
1505 38 : PointerGetDatum(stmt->subname));
1506 38 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1507 : NULL, 2, key);
1508 38 : if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1509 : {
1510 : Form_pg_trigger trigform;
1511 :
1512 38 : trigform = (Form_pg_trigger) GETSTRUCT(tuple);
1513 38 : tgoid = trigform->oid;
1514 :
1515 : /*
1516 : * If the trigger descends from a trigger on a parent partitioned
1517 : * table, reject the rename. We don't allow a trigger in a partition
1518 : * to differ in name from that of its parent: that would lead to an
1519 : * inconsistency that pg_dump would not reproduce.
1520 : */
1521 38 : if (OidIsValid(trigform->tgparentid))
1522 6 : ereport(ERROR,
1523 : errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1524 : errmsg("cannot rename trigger \"%s\" on table \"%s\"",
1525 : stmt->subname, RelationGetRelationName(targetrel)),
1526 : errhint("Rename the trigger on the partitioned table \"%s\" instead.",
1527 : get_rel_name(get_partition_parent(relid, false))));
1528 :
1529 :
1530 : /* Rename the trigger on this relation ... */
1531 32 : renametrig_internal(tgrel, targetrel, tuple, stmt->newname,
1532 32 : stmt->subname);
1533 :
1534 : /* ... and if it is partitioned, recurse to its partitions */
1535 32 : if (targetrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1536 : {
1537 18 : PartitionDesc partdesc = RelationGetPartitionDesc(targetrel, true);
1538 :
1539 30 : for (int i = 0; i < partdesc->nparts; i++)
1540 : {
1541 18 : Oid partitionId = partdesc->oids[i];
1542 :
1543 18 : renametrig_partition(tgrel, partitionId, trigform->oid,
1544 18 : stmt->newname, stmt->subname);
1545 : }
1546 : }
1547 : }
1548 : else
1549 : {
1550 0 : ereport(ERROR,
1551 : (errcode(ERRCODE_UNDEFINED_OBJECT),
1552 : errmsg("trigger \"%s\" for table \"%s\" does not exist",
1553 : stmt->subname, RelationGetRelationName(targetrel))));
1554 : }
1555 :
1556 26 : ObjectAddressSet(address, TriggerRelationId, tgoid);
1557 :
1558 26 : systable_endscan(tgscan);
1559 :
1560 26 : table_close(tgrel, RowExclusiveLock);
1561 :
1562 : /*
1563 : * Close rel, but keep exclusive lock!
1564 : */
1565 26 : relation_close(targetrel, NoLock);
1566 :
1567 26 : return address;
1568 : }
1569 :
1570 : /*
1571 : * Subroutine for renametrig -- perform the actual work of renaming one
1572 : * trigger on one table.
1573 : *
1574 : * If the trigger has a name different from the expected one, raise a
1575 : * NOTICE about it.
1576 : */
1577 : static void
1578 56 : renametrig_internal(Relation tgrel, Relation targetrel, HeapTuple trigtup,
1579 : const char *newname, const char *expected_name)
1580 : {
1581 : HeapTuple tuple;
1582 : Form_pg_trigger tgform;
1583 : ScanKeyData key[2];
1584 : SysScanDesc tgscan;
1585 :
1586 : /* If the trigger already has the new name, nothing to do. */
1587 56 : tgform = (Form_pg_trigger) GETSTRUCT(trigtup);
1588 56 : if (strcmp(NameStr(tgform->tgname), newname) == 0)
1589 0 : return;
1590 :
1591 : /*
1592 : * Before actually trying the rename, search for triggers with the same
1593 : * name. The update would fail with an ugly message in that case, and it
1594 : * is better to throw a nicer error.
1595 : */
1596 56 : ScanKeyInit(&key[0],
1597 : Anum_pg_trigger_tgrelid,
1598 : BTEqualStrategyNumber, F_OIDEQ,
1599 : ObjectIdGetDatum(RelationGetRelid(targetrel)));
1600 56 : ScanKeyInit(&key[1],
1601 : Anum_pg_trigger_tgname,
1602 : BTEqualStrategyNumber, F_NAMEEQ,
1603 : PointerGetDatum(newname));
1604 56 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1605 : NULL, 2, key);
1606 56 : if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1607 6 : ereport(ERROR,
1608 : (errcode(ERRCODE_DUPLICATE_OBJECT),
1609 : errmsg("trigger \"%s\" for relation \"%s\" already exists",
1610 : newname, RelationGetRelationName(targetrel))));
1611 50 : systable_endscan(tgscan);
1612 :
1613 : /*
1614 : * The target name is free; update the existing pg_trigger tuple with it.
1615 : */
1616 50 : tuple = heap_copytuple(trigtup); /* need a modifiable copy */
1617 50 : tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1618 :
1619 : /*
1620 : * If the trigger has a name different from what we expected, let the user
1621 : * know. (We can proceed anyway, since we must have reached here following
1622 : * a tgparentid link.)
1623 : */
1624 50 : if (strcmp(NameStr(tgform->tgname), expected_name) != 0)
1625 0 : ereport(NOTICE,
1626 : errmsg("renamed trigger \"%s\" on relation \"%s\"",
1627 : NameStr(tgform->tgname),
1628 : RelationGetRelationName(targetrel)));
1629 :
1630 50 : namestrcpy(&tgform->tgname, newname);
1631 :
1632 50 : CatalogTupleUpdate(tgrel, &tuple->t_self, tuple);
1633 :
1634 50 : InvokeObjectPostAlterHook(TriggerRelationId, tgform->oid, 0);
1635 :
1636 : /*
1637 : * Invalidate relation's relcache entry so that other backends (and this
1638 : * one too!) are sent SI message to make them rebuild relcache entries.
1639 : * (Ideally this should happen automatically...)
1640 : */
1641 50 : CacheInvalidateRelcache(targetrel);
1642 : }
1643 :
1644 : /*
1645 : * Subroutine for renametrig -- Helper for recursing to partitions when
1646 : * renaming triggers on a partitioned table.
1647 : */
1648 : static void
1649 30 : renametrig_partition(Relation tgrel, Oid partitionId, Oid parentTriggerOid,
1650 : const char *newname, const char *expected_name)
1651 : {
1652 : SysScanDesc tgscan;
1653 : ScanKeyData key;
1654 : HeapTuple tuple;
1655 :
1656 : /*
1657 : * Given a relation and the OID of a trigger on parent relation, find the
1658 : * corresponding trigger in the child and rename that trigger to the given
1659 : * name.
1660 : */
1661 30 : ScanKeyInit(&key,
1662 : Anum_pg_trigger_tgrelid,
1663 : BTEqualStrategyNumber, F_OIDEQ,
1664 : ObjectIdGetDatum(partitionId));
1665 30 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1666 : NULL, 1, &key);
1667 48 : while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1668 : {
1669 42 : Form_pg_trigger tgform = (Form_pg_trigger) GETSTRUCT(tuple);
1670 : Relation partitionRel;
1671 :
1672 42 : if (tgform->tgparentid != parentTriggerOid)
1673 18 : continue; /* not our trigger */
1674 :
1675 24 : partitionRel = table_open(partitionId, NoLock);
1676 :
1677 : /* Rename the trigger on this partition */
1678 24 : renametrig_internal(tgrel, partitionRel, tuple, newname, expected_name);
1679 :
1680 : /* And if this relation is partitioned, recurse to its partitions */
1681 18 : if (partitionRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
1682 : {
1683 6 : PartitionDesc partdesc = RelationGetPartitionDesc(partitionRel,
1684 : true);
1685 :
1686 18 : for (int i = 0; i < partdesc->nparts; i++)
1687 : {
1688 12 : Oid partoid = partdesc->oids[i];
1689 :
1690 12 : renametrig_partition(tgrel, partoid, tgform->oid, newname,
1691 12 : NameStr(tgform->tgname));
1692 : }
1693 : }
1694 18 : table_close(partitionRel, NoLock);
1695 :
1696 : /* There should be at most one matching tuple */
1697 18 : break;
1698 : }
1699 24 : systable_endscan(tgscan);
1700 24 : }
1701 :
1702 : /*
1703 : * EnableDisableTrigger()
1704 : *
1705 : * Called by ALTER TABLE ENABLE/DISABLE [ REPLICA | ALWAYS ] TRIGGER
1706 : * to change 'tgenabled' field for the specified trigger(s)
1707 : *
1708 : * rel: relation to process (caller must hold suitable lock on it)
1709 : * tgname: name of trigger to process, or NULL to scan all triggers
1710 : * tgparent: if not zero, process only triggers with this tgparentid
1711 : * fires_when: new value for tgenabled field. In addition to generic
1712 : * enablement/disablement, this also defines when the trigger
1713 : * should be fired in session replication roles.
1714 : * skip_system: if true, skip "system" triggers (constraint triggers)
1715 : * recurse: if true, recurse to partitions
1716 : *
1717 : * Caller should have checked permissions for the table; here we also
1718 : * enforce that superuser privilege is required to alter the state of
1719 : * system triggers
1720 : */
1721 : void
1722 450 : EnableDisableTrigger(Relation rel, const char *tgname, Oid tgparent,
1723 : char fires_when, bool skip_system, bool recurse,
1724 : LOCKMODE lockmode)
1725 : {
1726 : Relation tgrel;
1727 : int nkeys;
1728 : ScanKeyData keys[2];
1729 : SysScanDesc tgscan;
1730 : HeapTuple tuple;
1731 : bool found;
1732 : bool changed;
1733 :
1734 : /* Scan the relevant entries in pg_triggers */
1735 450 : tgrel = table_open(TriggerRelationId, RowExclusiveLock);
1736 :
1737 450 : ScanKeyInit(&keys[0],
1738 : Anum_pg_trigger_tgrelid,
1739 : BTEqualStrategyNumber, F_OIDEQ,
1740 : ObjectIdGetDatum(RelationGetRelid(rel)));
1741 450 : if (tgname)
1742 : {
1743 316 : ScanKeyInit(&keys[1],
1744 : Anum_pg_trigger_tgname,
1745 : BTEqualStrategyNumber, F_NAMEEQ,
1746 : CStringGetDatum(tgname));
1747 316 : nkeys = 2;
1748 : }
1749 : else
1750 134 : nkeys = 1;
1751 :
1752 450 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1753 : NULL, nkeys, keys);
1754 :
1755 450 : found = changed = false;
1756 :
1757 1128 : while (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
1758 : {
1759 678 : Form_pg_trigger oldtrig = (Form_pg_trigger) GETSTRUCT(tuple);
1760 :
1761 678 : if (OidIsValid(tgparent) && tgparent != oldtrig->tgparentid)
1762 156 : continue;
1763 :
1764 522 : if (oldtrig->tgisinternal)
1765 : {
1766 : /* system trigger ... ok to process? */
1767 60 : if (skip_system)
1768 12 : continue;
1769 48 : if (!superuser())
1770 0 : ereport(ERROR,
1771 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
1772 : errmsg("permission denied: \"%s\" is a system trigger",
1773 : NameStr(oldtrig->tgname))));
1774 : }
1775 :
1776 510 : found = true;
1777 :
1778 510 : if (oldtrig->tgenabled != fires_when)
1779 : {
1780 : /* need to change this one ... make a copy to scribble on */
1781 480 : HeapTuple newtup = heap_copytuple(tuple);
1782 480 : Form_pg_trigger newtrig = (Form_pg_trigger) GETSTRUCT(newtup);
1783 :
1784 480 : newtrig->tgenabled = fires_when;
1785 :
1786 480 : CatalogTupleUpdate(tgrel, &newtup->t_self, newtup);
1787 :
1788 480 : heap_freetuple(newtup);
1789 :
1790 480 : changed = true;
1791 : }
1792 :
1793 : /*
1794 : * When altering FOR EACH ROW triggers on a partitioned table, do the
1795 : * same on the partitions as well, unless ONLY is specified.
1796 : *
1797 : * Note that we recurse even if we didn't change the trigger above,
1798 : * because the partitions' copy of the trigger may have a different
1799 : * value of tgenabled than the parent's trigger and thus might need to
1800 : * be changed.
1801 : */
1802 510 : if (recurse &&
1803 482 : rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE &&
1804 86 : (TRIGGER_FOR_ROW(oldtrig->tgtype)))
1805 : {
1806 74 : PartitionDesc partdesc = RelationGetPartitionDesc(rel, true);
1807 : int i;
1808 :
1809 184 : for (i = 0; i < partdesc->nparts; i++)
1810 : {
1811 : Relation part;
1812 :
1813 110 : part = relation_open(partdesc->oids[i], lockmode);
1814 : /* Match on child triggers' tgparentid, not their name */
1815 110 : EnableDisableTrigger(part, NULL, oldtrig->oid,
1816 : fires_when, skip_system, recurse,
1817 : lockmode);
1818 110 : table_close(part, NoLock); /* keep lock till commit */
1819 : }
1820 : }
1821 :
1822 510 : InvokeObjectPostAlterHook(TriggerRelationId,
1823 : oldtrig->oid, 0);
1824 : }
1825 :
1826 450 : systable_endscan(tgscan);
1827 :
1828 450 : table_close(tgrel, RowExclusiveLock);
1829 :
1830 450 : if (tgname && !found)
1831 0 : ereport(ERROR,
1832 : (errcode(ERRCODE_UNDEFINED_OBJECT),
1833 : errmsg("trigger \"%s\" for table \"%s\" does not exist",
1834 : tgname, RelationGetRelationName(rel))));
1835 :
1836 : /*
1837 : * If we changed anything, broadcast a SI inval message to force each
1838 : * backend (including our own!) to rebuild relation's relcache entry.
1839 : * Otherwise they will fail to apply the change promptly.
1840 : */
1841 450 : if (changed)
1842 432 : CacheInvalidateRelcache(rel);
1843 450 : }
1844 :
1845 :
1846 : /*
1847 : * Build trigger data to attach to the given relcache entry.
1848 : *
1849 : * Note that trigger data attached to a relcache entry must be stored in
1850 : * CacheMemoryContext to ensure it survives as long as the relcache entry.
1851 : * But we should be running in a less long-lived working context. To avoid
1852 : * leaking cache memory if this routine fails partway through, we build a
1853 : * temporary TriggerDesc in working memory and then copy the completed
1854 : * structure into cache memory.
1855 : */
1856 : void
1857 58070 : RelationBuildTriggers(Relation relation)
1858 : {
1859 : TriggerDesc *trigdesc;
1860 : int numtrigs;
1861 : int maxtrigs;
1862 : Trigger *triggers;
1863 : Relation tgrel;
1864 : ScanKeyData skey;
1865 : SysScanDesc tgscan;
1866 : HeapTuple htup;
1867 : MemoryContext oldContext;
1868 : int i;
1869 :
1870 : /*
1871 : * Allocate a working array to hold the triggers (the array is extended if
1872 : * necessary)
1873 : */
1874 58070 : maxtrigs = 16;
1875 58070 : triggers = (Trigger *) palloc(maxtrigs * sizeof(Trigger));
1876 58070 : numtrigs = 0;
1877 :
1878 : /*
1879 : * Note: since we scan the triggers using TriggerRelidNameIndexId, we will
1880 : * be reading the triggers in name order, except possibly during
1881 : * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
1882 : * ensures that triggers will be fired in name order.
1883 : */
1884 58070 : ScanKeyInit(&skey,
1885 : Anum_pg_trigger_tgrelid,
1886 : BTEqualStrategyNumber, F_OIDEQ,
1887 : ObjectIdGetDatum(RelationGetRelid(relation)));
1888 :
1889 58070 : tgrel = table_open(TriggerRelationId, AccessShareLock);
1890 58070 : tgscan = systable_beginscan(tgrel, TriggerRelidNameIndexId, true,
1891 : NULL, 1, &skey);
1892 :
1893 156732 : while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
1894 : {
1895 98662 : Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
1896 : Trigger *build;
1897 : Datum datum;
1898 : bool isnull;
1899 :
1900 98662 : if (numtrigs >= maxtrigs)
1901 : {
1902 48 : maxtrigs *= 2;
1903 48 : triggers = (Trigger *) repalloc(triggers, maxtrigs * sizeof(Trigger));
1904 : }
1905 98662 : build = &(triggers[numtrigs]);
1906 :
1907 98662 : build->tgoid = pg_trigger->oid;
1908 98662 : build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
1909 : NameGetDatum(&pg_trigger->tgname)));
1910 98662 : build->tgfoid = pg_trigger->tgfoid;
1911 98662 : build->tgtype = pg_trigger->tgtype;
1912 98662 : build->tgenabled = pg_trigger->tgenabled;
1913 98662 : build->tgisinternal = pg_trigger->tgisinternal;
1914 98662 : build->tgisclone = OidIsValid(pg_trigger->tgparentid);
1915 98662 : build->tgconstrrelid = pg_trigger->tgconstrrelid;
1916 98662 : build->tgconstrindid = pg_trigger->tgconstrindid;
1917 98662 : build->tgconstraint = pg_trigger->tgconstraint;
1918 98662 : build->tgdeferrable = pg_trigger->tgdeferrable;
1919 98662 : build->tginitdeferred = pg_trigger->tginitdeferred;
1920 98662 : build->tgnargs = pg_trigger->tgnargs;
1921 : /* tgattr is first var-width field, so OK to access directly */
1922 98662 : build->tgnattr = pg_trigger->tgattr.dim1;
1923 98662 : if (build->tgnattr > 0)
1924 : {
1925 524 : build->tgattr = (int16 *) palloc(build->tgnattr * sizeof(int16));
1926 524 : memcpy(build->tgattr, &(pg_trigger->tgattr.values),
1927 524 : build->tgnattr * sizeof(int16));
1928 : }
1929 : else
1930 98138 : build->tgattr = NULL;
1931 98662 : if (build->tgnargs > 0)
1932 : {
1933 : bytea *val;
1934 : char *p;
1935 :
1936 2830 : val = DatumGetByteaPP(fastgetattr(htup,
1937 : Anum_pg_trigger_tgargs,
1938 : tgrel->rd_att, &isnull));
1939 2830 : if (isnull)
1940 0 : elog(ERROR, "tgargs is null in trigger for relation \"%s\"",
1941 : RelationGetRelationName(relation));
1942 2830 : p = (char *) VARDATA_ANY(val);
1943 2830 : build->tgargs = (char **) palloc(build->tgnargs * sizeof(char *));
1944 6370 : for (i = 0; i < build->tgnargs; i++)
1945 : {
1946 3540 : build->tgargs[i] = pstrdup(p);
1947 3540 : p += strlen(p) + 1;
1948 : }
1949 : }
1950 : else
1951 95832 : build->tgargs = NULL;
1952 :
1953 98662 : datum = fastgetattr(htup, Anum_pg_trigger_tgoldtable,
1954 : tgrel->rd_att, &isnull);
1955 98662 : if (!isnull)
1956 770 : build->tgoldtable =
1957 770 : DatumGetCString(DirectFunctionCall1(nameout, datum));
1958 : else
1959 97892 : build->tgoldtable = NULL;
1960 :
1961 98662 : datum = fastgetattr(htup, Anum_pg_trigger_tgnewtable,
1962 : tgrel->rd_att, &isnull);
1963 98662 : if (!isnull)
1964 1068 : build->tgnewtable =
1965 1068 : DatumGetCString(DirectFunctionCall1(nameout, datum));
1966 : else
1967 97594 : build->tgnewtable = NULL;
1968 :
1969 98662 : datum = fastgetattr(htup, Anum_pg_trigger_tgqual,
1970 : tgrel->rd_att, &isnull);
1971 98662 : if (!isnull)
1972 746 : build->tgqual = TextDatumGetCString(datum);
1973 : else
1974 97916 : build->tgqual = NULL;
1975 :
1976 98662 : numtrigs++;
1977 : }
1978 :
1979 58070 : systable_endscan(tgscan);
1980 58070 : table_close(tgrel, AccessShareLock);
1981 :
1982 : /* There might not be any triggers */
1983 58070 : if (numtrigs == 0)
1984 : {
1985 13648 : pfree(triggers);
1986 13648 : return;
1987 : }
1988 :
1989 : /* Build trigdesc */
1990 44422 : trigdesc = (TriggerDesc *) palloc0(sizeof(TriggerDesc));
1991 44422 : trigdesc->triggers = triggers;
1992 44422 : trigdesc->numtriggers = numtrigs;
1993 143084 : for (i = 0; i < numtrigs; i++)
1994 98662 : SetTriggerFlags(trigdesc, &(triggers[i]));
1995 :
1996 : /* Copy completed trigdesc into cache storage */
1997 44422 : oldContext = MemoryContextSwitchTo(CacheMemoryContext);
1998 44422 : relation->trigdesc = CopyTriggerDesc(trigdesc);
1999 44422 : MemoryContextSwitchTo(oldContext);
2000 :
2001 : /* Release working memory */
2002 44422 : FreeTriggerDesc(trigdesc);
2003 : }
2004 :
2005 : /*
2006 : * Update the TriggerDesc's hint flags to include the specified trigger
2007 : */
2008 : static void
2009 98662 : SetTriggerFlags(TriggerDesc *trigdesc, Trigger *trigger)
2010 : {
2011 98662 : int16 tgtype = trigger->tgtype;
2012 :
2013 98662 : trigdesc->trig_insert_before_row |=
2014 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2015 : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2016 98662 : trigdesc->trig_insert_after_row |=
2017 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2018 : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2019 98662 : trigdesc->trig_insert_instead_row |=
2020 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2021 : TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_INSERT);
2022 98662 : trigdesc->trig_insert_before_statement |=
2023 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2024 : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_INSERT);
2025 98662 : trigdesc->trig_insert_after_statement |=
2026 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2027 : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_INSERT);
2028 98662 : trigdesc->trig_update_before_row |=
2029 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2030 : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2031 98662 : trigdesc->trig_update_after_row |=
2032 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2033 : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2034 98662 : trigdesc->trig_update_instead_row |=
2035 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2036 : TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_UPDATE);
2037 98662 : trigdesc->trig_update_before_statement |=
2038 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2039 : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_UPDATE);
2040 98662 : trigdesc->trig_update_after_statement |=
2041 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2042 : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_UPDATE);
2043 98662 : trigdesc->trig_delete_before_row |=
2044 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2045 : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2046 98662 : trigdesc->trig_delete_after_row |=
2047 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2048 : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2049 98662 : trigdesc->trig_delete_instead_row |=
2050 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_ROW,
2051 : TRIGGER_TYPE_INSTEAD, TRIGGER_TYPE_DELETE);
2052 98662 : trigdesc->trig_delete_before_statement |=
2053 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2054 : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_DELETE);
2055 98662 : trigdesc->trig_delete_after_statement |=
2056 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2057 : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_DELETE);
2058 : /* there are no row-level truncate triggers */
2059 98662 : trigdesc->trig_truncate_before_statement |=
2060 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2061 : TRIGGER_TYPE_BEFORE, TRIGGER_TYPE_TRUNCATE);
2062 98662 : trigdesc->trig_truncate_after_statement |=
2063 98662 : TRIGGER_TYPE_MATCHES(tgtype, TRIGGER_TYPE_STATEMENT,
2064 : TRIGGER_TYPE_AFTER, TRIGGER_TYPE_TRUNCATE);
2065 :
2066 197324 : trigdesc->trig_insert_new_table |=
2067 132344 : (TRIGGER_FOR_INSERT(tgtype) &&
2068 33682 : TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2069 197324 : trigdesc->trig_update_old_table |=
2070 143346 : (TRIGGER_FOR_UPDATE(tgtype) &&
2071 44684 : TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2072 197324 : trigdesc->trig_update_new_table |=
2073 143346 : (TRIGGER_FOR_UPDATE(tgtype) &&
2074 44684 : TRIGGER_USES_TRANSITION_TABLE(trigger->tgnewtable));
2075 197324 : trigdesc->trig_delete_old_table |=
2076 125412 : (TRIGGER_FOR_DELETE(tgtype) &&
2077 26750 : TRIGGER_USES_TRANSITION_TABLE(trigger->tgoldtable));
2078 98662 : }
2079 :
2080 : /*
2081 : * Copy a TriggerDesc data structure.
2082 : *
2083 : * The copy is allocated in the current memory context.
2084 : */
2085 : TriggerDesc *
2086 480980 : CopyTriggerDesc(TriggerDesc *trigdesc)
2087 : {
2088 : TriggerDesc *newdesc;
2089 : Trigger *trigger;
2090 : int i;
2091 :
2092 480980 : if (trigdesc == NULL || trigdesc->numtriggers <= 0)
2093 418924 : return NULL;
2094 :
2095 62056 : newdesc = (TriggerDesc *) palloc(sizeof(TriggerDesc));
2096 62056 : memcpy(newdesc, trigdesc, sizeof(TriggerDesc));
2097 :
2098 62056 : trigger = (Trigger *) palloc(trigdesc->numtriggers * sizeof(Trigger));
2099 62056 : memcpy(trigger, trigdesc->triggers,
2100 62056 : trigdesc->numtriggers * sizeof(Trigger));
2101 62056 : newdesc->triggers = trigger;
2102 :
2103 209468 : for (i = 0; i < trigdesc->numtriggers; i++)
2104 : {
2105 147412 : trigger->tgname = pstrdup(trigger->tgname);
2106 147412 : if (trigger->tgnattr > 0)
2107 : {
2108 : int16 *newattr;
2109 :
2110 1004 : newattr = (int16 *) palloc(trigger->tgnattr * sizeof(int16));
2111 1004 : memcpy(newattr, trigger->tgattr,
2112 1004 : trigger->tgnattr * sizeof(int16));
2113 1004 : trigger->tgattr = newattr;
2114 : }
2115 147412 : if (trigger->tgnargs > 0)
2116 : {
2117 : char **newargs;
2118 : int16 j;
2119 :
2120 9304 : newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
2121 20868 : for (j = 0; j < trigger->tgnargs; j++)
2122 11564 : newargs[j] = pstrdup(trigger->tgargs[j]);
2123 9304 : trigger->tgargs = newargs;
2124 : }
2125 147412 : if (trigger->tgqual)
2126 1220 : trigger->tgqual = pstrdup(trigger->tgqual);
2127 147412 : if (trigger->tgoldtable)
2128 1898 : trigger->tgoldtable = pstrdup(trigger->tgoldtable);
2129 147412 : if (trigger->tgnewtable)
2130 2244 : trigger->tgnewtable = pstrdup(trigger->tgnewtable);
2131 147412 : trigger++;
2132 : }
2133 :
2134 62056 : return newdesc;
2135 : }
2136 :
2137 : /*
2138 : * Free a TriggerDesc data structure.
2139 : */
2140 : void
2141 1178620 : FreeTriggerDesc(TriggerDesc *trigdesc)
2142 : {
2143 : Trigger *trigger;
2144 : int i;
2145 :
2146 1178620 : if (trigdesc == NULL)
2147 1093662 : return;
2148 :
2149 84958 : trigger = trigdesc->triggers;
2150 270584 : for (i = 0; i < trigdesc->numtriggers; i++)
2151 : {
2152 185626 : pfree(trigger->tgname);
2153 185626 : if (trigger->tgnattr > 0)
2154 982 : pfree(trigger->tgattr);
2155 185626 : if (trigger->tgnargs > 0)
2156 : {
2157 11986 : while (--(trigger->tgnargs) >= 0)
2158 6676 : pfree(trigger->tgargs[trigger->tgnargs]);
2159 5310 : pfree(trigger->tgargs);
2160 : }
2161 185626 : if (trigger->tgqual)
2162 1384 : pfree(trigger->tgqual);
2163 185626 : if (trigger->tgoldtable)
2164 1460 : pfree(trigger->tgoldtable);
2165 185626 : if (trigger->tgnewtable)
2166 2040 : pfree(trigger->tgnewtable);
2167 185626 : trigger++;
2168 : }
2169 84958 : pfree(trigdesc->triggers);
2170 84958 : pfree(trigdesc);
2171 : }
2172 :
2173 : /*
2174 : * Compare two TriggerDesc structures for logical equality.
2175 : */
2176 : #ifdef NOT_USED
2177 : bool
2178 : equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
2179 : {
2180 : int i,
2181 : j;
2182 :
2183 : /*
2184 : * We need not examine the hint flags, just the trigger array itself; if
2185 : * we have the same triggers with the same types, the flags should match.
2186 : *
2187 : * As of 7.3 we assume trigger set ordering is significant in the
2188 : * comparison; so we just compare corresponding slots of the two sets.
2189 : *
2190 : * Note: comparing the stringToNode forms of the WHEN clauses means that
2191 : * parse column locations will affect the result. This is okay as long as
2192 : * this function is only used for detecting exact equality, as for example
2193 : * in checking for staleness of a cache entry.
2194 : */
2195 : if (trigdesc1 != NULL)
2196 : {
2197 : if (trigdesc2 == NULL)
2198 : return false;
2199 : if (trigdesc1->numtriggers != trigdesc2->numtriggers)
2200 : return false;
2201 : for (i = 0; i < trigdesc1->numtriggers; i++)
2202 : {
2203 : Trigger *trig1 = trigdesc1->triggers + i;
2204 : Trigger *trig2 = trigdesc2->triggers + i;
2205 :
2206 : if (trig1->tgoid != trig2->tgoid)
2207 : return false;
2208 : if (strcmp(trig1->tgname, trig2->tgname) != 0)
2209 : return false;
2210 : if (trig1->tgfoid != trig2->tgfoid)
2211 : return false;
2212 : if (trig1->tgtype != trig2->tgtype)
2213 : return false;
2214 : if (trig1->tgenabled != trig2->tgenabled)
2215 : return false;
2216 : if (trig1->tgisinternal != trig2->tgisinternal)
2217 : return false;
2218 : if (trig1->tgisclone != trig2->tgisclone)
2219 : return false;
2220 : if (trig1->tgconstrrelid != trig2->tgconstrrelid)
2221 : return false;
2222 : if (trig1->tgconstrindid != trig2->tgconstrindid)
2223 : return false;
2224 : if (trig1->tgconstraint != trig2->tgconstraint)
2225 : return false;
2226 : if (trig1->tgdeferrable != trig2->tgdeferrable)
2227 : return false;
2228 : if (trig1->tginitdeferred != trig2->tginitdeferred)
2229 : return false;
2230 : if (trig1->tgnargs != trig2->tgnargs)
2231 : return false;
2232 : if (trig1->tgnattr != trig2->tgnattr)
2233 : return false;
2234 : if (trig1->tgnattr > 0 &&
2235 : memcmp(trig1->tgattr, trig2->tgattr,
2236 : trig1->tgnattr * sizeof(int16)) != 0)
2237 : return false;
2238 : for (j = 0; j < trig1->tgnargs; j++)
2239 : if (strcmp(trig1->tgargs[j], trig2->tgargs[j]) != 0)
2240 : return false;
2241 : if (trig1->tgqual == NULL && trig2->tgqual == NULL)
2242 : /* ok */ ;
2243 : else if (trig1->tgqual == NULL || trig2->tgqual == NULL)
2244 : return false;
2245 : else if (strcmp(trig1->tgqual, trig2->tgqual) != 0)
2246 : return false;
2247 : if (trig1->tgoldtable == NULL && trig2->tgoldtable == NULL)
2248 : /* ok */ ;
2249 : else if (trig1->tgoldtable == NULL || trig2->tgoldtable == NULL)
2250 : return false;
2251 : else if (strcmp(trig1->tgoldtable, trig2->tgoldtable) != 0)
2252 : return false;
2253 : if (trig1->tgnewtable == NULL && trig2->tgnewtable == NULL)
2254 : /* ok */ ;
2255 : else if (trig1->tgnewtable == NULL || trig2->tgnewtable == NULL)
2256 : return false;
2257 : else if (strcmp(trig1->tgnewtable, trig2->tgnewtable) != 0)
2258 : return false;
2259 : }
2260 : }
2261 : else if (trigdesc2 != NULL)
2262 : return false;
2263 : return true;
2264 : }
2265 : #endif /* NOT_USED */
2266 :
2267 : /*
2268 : * Check if there is a row-level trigger with transition tables that prevents
2269 : * a table from becoming an inheritance child or partition. Return the name
2270 : * of the first such incompatible trigger, or NULL if there is none.
2271 : */
2272 : const char *
2273 2376 : FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc)
2274 : {
2275 2376 : if (trigdesc != NULL)
2276 : {
2277 : int i;
2278 :
2279 390 : for (i = 0; i < trigdesc->numtriggers; ++i)
2280 : {
2281 282 : Trigger *trigger = &trigdesc->triggers[i];
2282 :
2283 282 : if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
2284 12 : return trigger->tgname;
2285 : }
2286 : }
2287 :
2288 2364 : return NULL;
2289 : }
2290 :
2291 : /*
2292 : * Call a trigger function.
2293 : *
2294 : * trigdata: trigger descriptor.
2295 : * tgindx: trigger's index in finfo and instr arrays.
2296 : * finfo: array of cached trigger function call information.
2297 : * instr: optional array of EXPLAIN ANALYZE instrumentation state.
2298 : * per_tuple_context: memory context to execute the function in.
2299 : *
2300 : * Returns the tuple (or NULL) as returned by the function.
2301 : */
2302 : static HeapTuple
2303 21874 : ExecCallTriggerFunc(TriggerData *trigdata,
2304 : int tgindx,
2305 : FmgrInfo *finfo,
2306 : Instrumentation *instr,
2307 : MemoryContext per_tuple_context)
2308 : {
2309 21874 : LOCAL_FCINFO(fcinfo, 0);
2310 : PgStat_FunctionCallUsage fcusage;
2311 : Datum result;
2312 : MemoryContext oldContext;
2313 :
2314 : /*
2315 : * Protect against code paths that may fail to initialize transition table
2316 : * info.
2317 : */
2318 : Assert(((TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) ||
2319 : TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event) ||
2320 : TRIGGER_FIRED_BY_DELETE(trigdata->tg_event)) &&
2321 : TRIGGER_FIRED_AFTER(trigdata->tg_event) &&
2322 : !(trigdata->tg_event & AFTER_TRIGGER_DEFERRABLE) &&
2323 : !(trigdata->tg_event & AFTER_TRIGGER_INITDEFERRED)) ||
2324 : (trigdata->tg_oldtable == NULL && trigdata->tg_newtable == NULL));
2325 :
2326 21874 : finfo += tgindx;
2327 :
2328 : /*
2329 : * We cache fmgr lookup info, to avoid making the lookup again on each
2330 : * call.
2331 : */
2332 21874 : if (finfo->fn_oid == InvalidOid)
2333 18686 : fmgr_info(trigdata->tg_trigger->tgfoid, finfo);
2334 :
2335 : Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
2336 :
2337 : /*
2338 : * If doing EXPLAIN ANALYZE, start charging time to this trigger.
2339 : */
2340 21874 : if (instr)
2341 0 : InstrStartNode(instr + tgindx);
2342 :
2343 : /*
2344 : * Do the function evaluation in the per-tuple memory context, so that
2345 : * leaked memory will be reclaimed once per tuple. Note in particular that
2346 : * any new tuple created by the trigger function will live till the end of
2347 : * the tuple cycle.
2348 : */
2349 21874 : oldContext = MemoryContextSwitchTo(per_tuple_context);
2350 :
2351 : /*
2352 : * Call the function, passing no arguments but setting a context.
2353 : */
2354 21874 : InitFunctionCallInfoData(*fcinfo, finfo, 0,
2355 : InvalidOid, (Node *) trigdata, NULL);
2356 :
2357 21874 : pgstat_init_function_usage(fcinfo, &fcusage);
2358 :
2359 21874 : MyTriggerDepth++;
2360 21874 : PG_TRY();
2361 : {
2362 21874 : result = FunctionCallInvoke(fcinfo);
2363 : }
2364 1402 : PG_FINALLY();
2365 : {
2366 21874 : MyTriggerDepth--;
2367 : }
2368 21874 : PG_END_TRY();
2369 :
2370 20472 : pgstat_end_function_usage(&fcusage, true);
2371 :
2372 20472 : MemoryContextSwitchTo(oldContext);
2373 :
2374 : /*
2375 : * Trigger protocol allows function to return a null pointer, but NOT to
2376 : * set the isnull result flag.
2377 : */
2378 20472 : if (fcinfo->isnull)
2379 0 : ereport(ERROR,
2380 : (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2381 : errmsg("trigger function %u returned null value",
2382 : fcinfo->flinfo->fn_oid)));
2383 :
2384 : /*
2385 : * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
2386 : * one "tuple returned" (really the number of firings).
2387 : */
2388 20472 : if (instr)
2389 0 : InstrStopNode(instr + tgindx, 1);
2390 :
2391 20472 : return (HeapTuple) DatumGetPointer(result);
2392 : }
2393 :
2394 : void
2395 94246 : ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
2396 : {
2397 : TriggerDesc *trigdesc;
2398 : int i;
2399 94246 : TriggerData LocTriggerData = {0};
2400 :
2401 94246 : trigdesc = relinfo->ri_TrigDesc;
2402 :
2403 94246 : if (trigdesc == NULL)
2404 94034 : return;
2405 7228 : if (!trigdesc->trig_insert_before_statement)
2406 7016 : return;
2407 :
2408 : /* no-op if we already fired BS triggers in this context */
2409 212 : if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2410 : CMD_INSERT))
2411 0 : return;
2412 :
2413 212 : LocTriggerData.type = T_TriggerData;
2414 212 : LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2415 : TRIGGER_EVENT_BEFORE;
2416 212 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2417 1832 : for (i = 0; i < trigdesc->numtriggers; i++)
2418 : {
2419 1632 : Trigger *trigger = &trigdesc->triggers[i];
2420 : HeapTuple newtuple;
2421 :
2422 1632 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2423 : TRIGGER_TYPE_STATEMENT,
2424 : TRIGGER_TYPE_BEFORE,
2425 : TRIGGER_TYPE_INSERT))
2426 1408 : continue;
2427 224 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2428 : NULL, NULL, NULL))
2429 30 : continue;
2430 :
2431 194 : LocTriggerData.tg_trigger = trigger;
2432 194 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2433 : i,
2434 : relinfo->ri_TrigFunctions,
2435 : relinfo->ri_TrigInstrument,
2436 194 : GetPerTupleMemoryContext(estate));
2437 :
2438 182 : if (newtuple)
2439 0 : ereport(ERROR,
2440 : (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2441 : errmsg("BEFORE STATEMENT trigger cannot return a value")));
2442 : }
2443 : }
2444 :
2445 : void
2446 91780 : ExecASInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2447 : TransitionCaptureState *transition_capture)
2448 : {
2449 91780 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2450 :
2451 91780 : if (trigdesc && trigdesc->trig_insert_after_statement)
2452 424 : AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2453 : TRIGGER_EVENT_INSERT,
2454 : false, NULL, NULL, NIL, NULL, transition_capture,
2455 : false);
2456 91780 : }
2457 :
2458 : bool
2459 2400 : ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2460 : TupleTableSlot *slot)
2461 : {
2462 2400 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2463 2400 : HeapTuple newtuple = NULL;
2464 : bool should_free;
2465 2400 : TriggerData LocTriggerData = {0};
2466 : int i;
2467 :
2468 2400 : LocTriggerData.type = T_TriggerData;
2469 2400 : LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2470 : TRIGGER_EVENT_ROW |
2471 : TRIGGER_EVENT_BEFORE;
2472 2400 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2473 11006 : for (i = 0; i < trigdesc->numtriggers; i++)
2474 : {
2475 8940 : Trigger *trigger = &trigdesc->triggers[i];
2476 : HeapTuple oldtuple;
2477 :
2478 8940 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2479 : TRIGGER_TYPE_ROW,
2480 : TRIGGER_TYPE_BEFORE,
2481 : TRIGGER_TYPE_INSERT))
2482 4244 : continue;
2483 4696 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2484 : NULL, NULL, slot))
2485 50 : continue;
2486 :
2487 4646 : if (!newtuple)
2488 2366 : newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2489 :
2490 4646 : LocTriggerData.tg_trigslot = slot;
2491 4646 : LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2492 4646 : LocTriggerData.tg_trigger = trigger;
2493 4646 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2494 : i,
2495 : relinfo->ri_TrigFunctions,
2496 : relinfo->ri_TrigInstrument,
2497 4646 : GetPerTupleMemoryContext(estate));
2498 4554 : if (newtuple == NULL)
2499 : {
2500 218 : if (should_free)
2501 20 : heap_freetuple(oldtuple);
2502 218 : return false; /* "do nothing" */
2503 : }
2504 4336 : else if (newtuple != oldtuple)
2505 : {
2506 746 : ExecForceStoreHeapTuple(newtuple, slot, false);
2507 :
2508 : /*
2509 : * After a tuple in a partition goes through a trigger, the user
2510 : * could have changed the partition key enough that the tuple no
2511 : * longer fits the partition. Verify that.
2512 : */
2513 746 : if (trigger->tgisclone &&
2514 66 : !ExecPartitionCheck(relinfo, slot, estate, false))
2515 24 : ereport(ERROR,
2516 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2517 : errmsg("moving row to another partition during a BEFORE FOR EACH ROW trigger is not supported"),
2518 : errdetail("Before executing trigger \"%s\", the row was to be in partition \"%s.%s\".",
2519 : trigger->tgname,
2520 : get_namespace_name(RelationGetNamespace(relinfo->ri_RelationDesc)),
2521 : RelationGetRelationName(relinfo->ri_RelationDesc))));
2522 :
2523 722 : if (should_free)
2524 40 : heap_freetuple(oldtuple);
2525 :
2526 : /* signal tuple should be re-fetched if used */
2527 722 : newtuple = NULL;
2528 : }
2529 : }
2530 :
2531 2066 : return true;
2532 : }
2533 :
2534 : void
2535 11805824 : ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2536 : TupleTableSlot *slot, List *recheckIndexes,
2537 : TransitionCaptureState *transition_capture)
2538 : {
2539 11805824 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2540 :
2541 11805824 : if ((trigdesc && trigdesc->trig_insert_after_row) ||
2542 60300 : (transition_capture && transition_capture->tcs_insert_new_table))
2543 65514 : AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2544 : TRIGGER_EVENT_INSERT,
2545 : true, NULL, slot,
2546 : recheckIndexes, NULL,
2547 : transition_capture,
2548 : false);
2549 11805824 : }
2550 :
2551 : bool
2552 180 : ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
2553 : TupleTableSlot *slot)
2554 : {
2555 180 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2556 180 : HeapTuple newtuple = NULL;
2557 : bool should_free;
2558 180 : TriggerData LocTriggerData = {0};
2559 : int i;
2560 :
2561 180 : LocTriggerData.type = T_TriggerData;
2562 180 : LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
2563 : TRIGGER_EVENT_ROW |
2564 : TRIGGER_EVENT_INSTEAD;
2565 180 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2566 546 : for (i = 0; i < trigdesc->numtriggers; i++)
2567 : {
2568 384 : Trigger *trigger = &trigdesc->triggers[i];
2569 : HeapTuple oldtuple;
2570 :
2571 384 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2572 : TRIGGER_TYPE_ROW,
2573 : TRIGGER_TYPE_INSTEAD,
2574 : TRIGGER_TYPE_INSERT))
2575 204 : continue;
2576 180 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2577 : NULL, NULL, slot))
2578 0 : continue;
2579 :
2580 180 : if (!newtuple)
2581 180 : newtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2582 :
2583 180 : LocTriggerData.tg_trigslot = slot;
2584 180 : LocTriggerData.tg_trigtuple = oldtuple = newtuple;
2585 180 : LocTriggerData.tg_trigger = trigger;
2586 180 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2587 : i,
2588 : relinfo->ri_TrigFunctions,
2589 : relinfo->ri_TrigInstrument,
2590 180 : GetPerTupleMemoryContext(estate));
2591 180 : if (newtuple == NULL)
2592 : {
2593 18 : if (should_free)
2594 18 : heap_freetuple(oldtuple);
2595 18 : return false; /* "do nothing" */
2596 : }
2597 162 : else if (newtuple != oldtuple)
2598 : {
2599 36 : ExecForceStoreHeapTuple(newtuple, slot, false);
2600 :
2601 36 : if (should_free)
2602 36 : heap_freetuple(oldtuple);
2603 :
2604 : /* signal tuple should be re-fetched if used */
2605 36 : newtuple = NULL;
2606 : }
2607 : }
2608 :
2609 162 : return true;
2610 : }
2611 :
2612 : void
2613 12456 : ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
2614 : {
2615 : TriggerDesc *trigdesc;
2616 : int i;
2617 12456 : TriggerData LocTriggerData = {0};
2618 :
2619 12456 : trigdesc = relinfo->ri_TrigDesc;
2620 :
2621 12456 : if (trigdesc == NULL)
2622 12378 : return;
2623 1638 : if (!trigdesc->trig_delete_before_statement)
2624 1518 : return;
2625 :
2626 : /* no-op if we already fired BS triggers in this context */
2627 120 : if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2628 : CMD_DELETE))
2629 42 : return;
2630 :
2631 78 : LocTriggerData.type = T_TriggerData;
2632 78 : LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2633 : TRIGGER_EVENT_BEFORE;
2634 78 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2635 708 : for (i = 0; i < trigdesc->numtriggers; i++)
2636 : {
2637 630 : Trigger *trigger = &trigdesc->triggers[i];
2638 : HeapTuple newtuple;
2639 :
2640 630 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2641 : TRIGGER_TYPE_STATEMENT,
2642 : TRIGGER_TYPE_BEFORE,
2643 : TRIGGER_TYPE_DELETE))
2644 552 : continue;
2645 78 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2646 : NULL, NULL, NULL))
2647 12 : continue;
2648 :
2649 66 : LocTriggerData.tg_trigger = trigger;
2650 66 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2651 : i,
2652 : relinfo->ri_TrigFunctions,
2653 : relinfo->ri_TrigInstrument,
2654 66 : GetPerTupleMemoryContext(estate));
2655 :
2656 66 : if (newtuple)
2657 0 : ereport(ERROR,
2658 : (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2659 : errmsg("BEFORE STATEMENT trigger cannot return a value")));
2660 : }
2661 : }
2662 :
2663 : void
2664 12296 : ExecASDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
2665 : TransitionCaptureState *transition_capture)
2666 : {
2667 12296 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2668 :
2669 12296 : if (trigdesc && trigdesc->trig_delete_after_statement)
2670 230 : AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2671 : TRIGGER_EVENT_DELETE,
2672 : false, NULL, NULL, NIL, NULL, transition_capture,
2673 : false);
2674 12296 : }
2675 :
2676 : /*
2677 : * Execute BEFORE ROW DELETE triggers.
2678 : *
2679 : * True indicates caller can proceed with the delete. False indicates caller
2680 : * need to suppress the delete and additionally if requested, we need to pass
2681 : * back the concurrently updated tuple if any.
2682 : */
2683 : bool
2684 388 : ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
2685 : ResultRelInfo *relinfo,
2686 : ItemPointer tupleid,
2687 : HeapTuple fdw_trigtuple,
2688 : TupleTableSlot **epqslot,
2689 : TM_Result *tmresult,
2690 : TM_FailureData *tmfd)
2691 : {
2692 388 : TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2693 388 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2694 388 : bool result = true;
2695 388 : TriggerData LocTriggerData = {0};
2696 : HeapTuple trigtuple;
2697 388 : bool should_free = false;
2698 : int i;
2699 :
2700 : Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2701 388 : if (fdw_trigtuple == NULL)
2702 : {
2703 372 : TupleTableSlot *epqslot_candidate = NULL;
2704 :
2705 372 : if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2706 : LockTupleExclusive, slot, &epqslot_candidate,
2707 : tmresult, tmfd))
2708 12 : return false;
2709 :
2710 : /*
2711 : * If the tuple was concurrently updated and the caller of this
2712 : * function requested for the updated tuple, skip the trigger
2713 : * execution.
2714 : */
2715 356 : if (epqslot_candidate != NULL && epqslot != NULL)
2716 : {
2717 2 : *epqslot = epqslot_candidate;
2718 2 : return false;
2719 : }
2720 :
2721 354 : trigtuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
2722 : }
2723 : else
2724 : {
2725 16 : trigtuple = fdw_trigtuple;
2726 16 : ExecForceStoreHeapTuple(trigtuple, slot, false);
2727 : }
2728 :
2729 370 : LocTriggerData.type = T_TriggerData;
2730 370 : LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2731 : TRIGGER_EVENT_ROW |
2732 : TRIGGER_EVENT_BEFORE;
2733 370 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2734 1300 : for (i = 0; i < trigdesc->numtriggers; i++)
2735 : {
2736 : HeapTuple newtuple;
2737 1010 : Trigger *trigger = &trigdesc->triggers[i];
2738 :
2739 1010 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2740 : TRIGGER_TYPE_ROW,
2741 : TRIGGER_TYPE_BEFORE,
2742 : TRIGGER_TYPE_DELETE))
2743 634 : continue;
2744 376 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2745 : NULL, slot, NULL))
2746 14 : continue;
2747 :
2748 362 : LocTriggerData.tg_trigslot = slot;
2749 362 : LocTriggerData.tg_trigtuple = trigtuple;
2750 362 : LocTriggerData.tg_trigger = trigger;
2751 362 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2752 : i,
2753 : relinfo->ri_TrigFunctions,
2754 : relinfo->ri_TrigInstrument,
2755 362 : GetPerTupleMemoryContext(estate));
2756 334 : if (newtuple == NULL)
2757 : {
2758 52 : result = false; /* tell caller to suppress delete */
2759 52 : break;
2760 : }
2761 282 : if (newtuple != trigtuple)
2762 56 : heap_freetuple(newtuple);
2763 : }
2764 342 : if (should_free)
2765 0 : heap_freetuple(trigtuple);
2766 :
2767 342 : return result;
2768 : }
2769 :
2770 : /*
2771 : * Note: is_crosspart_update must be true if the DELETE is being performed
2772 : * as part of a cross-partition update.
2773 : */
2774 : void
2775 1720210 : ExecARDeleteTriggers(EState *estate,
2776 : ResultRelInfo *relinfo,
2777 : ItemPointer tupleid,
2778 : HeapTuple fdw_trigtuple,
2779 : TransitionCaptureState *transition_capture,
2780 : bool is_crosspart_update)
2781 : {
2782 1720210 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2783 :
2784 1720210 : if ((trigdesc && trigdesc->trig_delete_after_row) ||
2785 4998 : (transition_capture && transition_capture->tcs_delete_old_table))
2786 : {
2787 6212 : TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2788 :
2789 : Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2790 6212 : if (fdw_trigtuple == NULL)
2791 6196 : GetTupleForTrigger(estate,
2792 : NULL,
2793 : relinfo,
2794 : tupleid,
2795 : LockTupleExclusive,
2796 : slot,
2797 : NULL,
2798 : NULL,
2799 : NULL);
2800 : else
2801 16 : ExecForceStoreHeapTuple(fdw_trigtuple, slot, false);
2802 :
2803 6212 : AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2804 : TRIGGER_EVENT_DELETE,
2805 : true, slot, NULL, NIL, NULL,
2806 : transition_capture,
2807 : is_crosspart_update);
2808 : }
2809 1720210 : }
2810 :
2811 : bool
2812 60 : ExecIRDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
2813 : HeapTuple trigtuple)
2814 : {
2815 60 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2816 60 : TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
2817 60 : TriggerData LocTriggerData = {0};
2818 : int i;
2819 :
2820 60 : LocTriggerData.type = T_TriggerData;
2821 60 : LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
2822 : TRIGGER_EVENT_ROW |
2823 : TRIGGER_EVENT_INSTEAD;
2824 60 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2825 :
2826 60 : ExecForceStoreHeapTuple(trigtuple, slot, false);
2827 :
2828 354 : for (i = 0; i < trigdesc->numtriggers; i++)
2829 : {
2830 : HeapTuple rettuple;
2831 300 : Trigger *trigger = &trigdesc->triggers[i];
2832 :
2833 300 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2834 : TRIGGER_TYPE_ROW,
2835 : TRIGGER_TYPE_INSTEAD,
2836 : TRIGGER_TYPE_DELETE))
2837 240 : continue;
2838 60 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2839 : NULL, slot, NULL))
2840 0 : continue;
2841 :
2842 60 : LocTriggerData.tg_trigslot = slot;
2843 60 : LocTriggerData.tg_trigtuple = trigtuple;
2844 60 : LocTriggerData.tg_trigger = trigger;
2845 60 : rettuple = ExecCallTriggerFunc(&LocTriggerData,
2846 : i,
2847 : relinfo->ri_TrigFunctions,
2848 : relinfo->ri_TrigInstrument,
2849 60 : GetPerTupleMemoryContext(estate));
2850 60 : if (rettuple == NULL)
2851 6 : return false; /* Delete was suppressed */
2852 54 : if (rettuple != trigtuple)
2853 0 : heap_freetuple(rettuple);
2854 : }
2855 54 : return true;
2856 : }
2857 :
2858 : void
2859 14490 : ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
2860 : {
2861 : TriggerDesc *trigdesc;
2862 : int i;
2863 14490 : TriggerData LocTriggerData = {0};
2864 : Bitmapset *updatedCols;
2865 :
2866 14490 : trigdesc = relinfo->ri_TrigDesc;
2867 :
2868 14490 : if (trigdesc == NULL)
2869 14312 : return;
2870 4118 : if (!trigdesc->trig_update_before_statement)
2871 3940 : return;
2872 :
2873 : /* no-op if we already fired BS triggers in this context */
2874 178 : if (before_stmt_triggers_fired(RelationGetRelid(relinfo->ri_RelationDesc),
2875 : CMD_UPDATE))
2876 0 : return;
2877 :
2878 : /* statement-level triggers operate on the parent table */
2879 : Assert(relinfo->ri_RootResultRelInfo == NULL);
2880 :
2881 178 : updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2882 :
2883 178 : LocTriggerData.type = T_TriggerData;
2884 178 : LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
2885 : TRIGGER_EVENT_BEFORE;
2886 178 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
2887 178 : LocTriggerData.tg_updatedcols = updatedCols;
2888 1600 : for (i = 0; i < trigdesc->numtriggers; i++)
2889 : {
2890 1422 : Trigger *trigger = &trigdesc->triggers[i];
2891 : HeapTuple newtuple;
2892 :
2893 1422 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
2894 : TRIGGER_TYPE_STATEMENT,
2895 : TRIGGER_TYPE_BEFORE,
2896 : TRIGGER_TYPE_UPDATE))
2897 1244 : continue;
2898 178 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
2899 : updatedCols, NULL, NULL))
2900 6 : continue;
2901 :
2902 172 : LocTriggerData.tg_trigger = trigger;
2903 172 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
2904 : i,
2905 : relinfo->ri_TrigFunctions,
2906 : relinfo->ri_TrigInstrument,
2907 172 : GetPerTupleMemoryContext(estate));
2908 :
2909 172 : if (newtuple)
2910 0 : ereport(ERROR,
2911 : (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
2912 : errmsg("BEFORE STATEMENT trigger cannot return a value")));
2913 : }
2914 : }
2915 :
2916 : void
2917 13604 : ExecASUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
2918 : TransitionCaptureState *transition_capture)
2919 : {
2920 13604 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2921 :
2922 : /* statement-level triggers operate on the parent table */
2923 : Assert(relinfo->ri_RootResultRelInfo == NULL);
2924 :
2925 13604 : if (trigdesc && trigdesc->trig_update_after_statement)
2926 396 : AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
2927 : TRIGGER_EVENT_UPDATE,
2928 : false, NULL, NULL, NIL,
2929 : ExecGetAllUpdatedCols(relinfo, estate),
2930 : transition_capture,
2931 : false);
2932 13604 : }
2933 :
2934 : bool
2935 2578 : ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
2936 : ResultRelInfo *relinfo,
2937 : ItemPointer tupleid,
2938 : HeapTuple fdw_trigtuple,
2939 : TupleTableSlot *newslot,
2940 : TM_Result *tmresult,
2941 : TM_FailureData *tmfd)
2942 : {
2943 2578 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
2944 2578 : TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
2945 2578 : HeapTuple newtuple = NULL;
2946 : HeapTuple trigtuple;
2947 2578 : bool should_free_trig = false;
2948 2578 : bool should_free_new = false;
2949 2578 : TriggerData LocTriggerData = {0};
2950 : int i;
2951 : Bitmapset *updatedCols;
2952 : LockTupleMode lockmode;
2953 :
2954 : /* Determine lock mode to use */
2955 2578 : lockmode = ExecUpdateLockMode(estate, relinfo);
2956 :
2957 : Assert(HeapTupleIsValid(fdw_trigtuple) ^ ItemPointerIsValid(tupleid));
2958 2578 : if (fdw_trigtuple == NULL)
2959 : {
2960 2540 : TupleTableSlot *epqslot_candidate = NULL;
2961 :
2962 : /* get a copy of the on-disk tuple we are planning to update */
2963 2540 : if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
2964 : lockmode, oldslot, &epqslot_candidate,
2965 : tmresult, tmfd))
2966 22 : return false; /* cancel the update action */
2967 :
2968 : /*
2969 : * In READ COMMITTED isolation level it's possible that target tuple
2970 : * was changed due to concurrent update. In that case we have a raw
2971 : * subplan output tuple in epqslot_candidate, and need to form a new
2972 : * insertable tuple using ExecGetUpdateNewTuple to replace the one we
2973 : * received in newslot. Neither we nor our callers have any further
2974 : * interest in the passed-in tuple, so it's okay to overwrite newslot
2975 : * with the newer data.
2976 : */
2977 2510 : if (epqslot_candidate != NULL)
2978 : {
2979 : TupleTableSlot *epqslot_clean;
2980 :
2981 6 : epqslot_clean = ExecGetUpdateNewTuple(relinfo, epqslot_candidate,
2982 : oldslot);
2983 :
2984 : /*
2985 : * Typically, the caller's newslot was also generated by
2986 : * ExecGetUpdateNewTuple, so that epqslot_clean will be the same
2987 : * slot and copying is not needed. But do the right thing if it
2988 : * isn't.
2989 : */
2990 6 : if (unlikely(newslot != epqslot_clean))
2991 0 : ExecCopySlot(newslot, epqslot_clean);
2992 :
2993 : /*
2994 : * At this point newslot contains a virtual tuple that may
2995 : * reference some fields of oldslot's tuple in some disk buffer.
2996 : * If that tuple is in a different page than the original target
2997 : * tuple, then our only pin on that buffer is oldslot's, and we're
2998 : * about to release it. Hence we'd better materialize newslot to
2999 : * ensure it doesn't contain references into an unpinned buffer.
3000 : * (We'd materialize it below anyway, but too late for safety.)
3001 : */
3002 6 : ExecMaterializeSlot(newslot);
3003 : }
3004 :
3005 : /*
3006 : * Here we convert oldslot to a materialized slot holding trigtuple.
3007 : * Neither slot passed to the triggers will hold any buffer pin.
3008 : */
3009 2510 : trigtuple = ExecFetchSlotHeapTuple(oldslot, true, &should_free_trig);
3010 : }
3011 : else
3012 : {
3013 : /* Put the FDW-supplied tuple into oldslot to unify the cases */
3014 38 : ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3015 38 : trigtuple = fdw_trigtuple;
3016 : }
3017 :
3018 2548 : LocTriggerData.type = T_TriggerData;
3019 2548 : LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3020 : TRIGGER_EVENT_ROW |
3021 : TRIGGER_EVENT_BEFORE;
3022 2548 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3023 2548 : updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
3024 2548 : LocTriggerData.tg_updatedcols = updatedCols;
3025 11410 : for (i = 0; i < trigdesc->numtriggers; i++)
3026 : {
3027 9028 : Trigger *trigger = &trigdesc->triggers[i];
3028 : HeapTuple oldtuple;
3029 :
3030 9028 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3031 : TRIGGER_TYPE_ROW,
3032 : TRIGGER_TYPE_BEFORE,
3033 : TRIGGER_TYPE_UPDATE))
3034 4396 : continue;
3035 4632 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3036 : updatedCols, oldslot, newslot))
3037 86 : continue;
3038 :
3039 4546 : if (!newtuple)
3040 2554 : newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free_new);
3041 :
3042 4546 : LocTriggerData.tg_trigslot = oldslot;
3043 4546 : LocTriggerData.tg_trigtuple = trigtuple;
3044 4546 : LocTriggerData.tg_newtuple = oldtuple = newtuple;
3045 4546 : LocTriggerData.tg_newslot = newslot;
3046 4546 : LocTriggerData.tg_trigger = trigger;
3047 4546 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
3048 : i,
3049 : relinfo->ri_TrigFunctions,
3050 : relinfo->ri_TrigInstrument,
3051 4546 : GetPerTupleMemoryContext(estate));
3052 :
3053 4518 : if (newtuple == NULL)
3054 : {
3055 138 : if (should_free_trig)
3056 0 : heap_freetuple(trigtuple);
3057 138 : if (should_free_new)
3058 4 : heap_freetuple(oldtuple);
3059 138 : return false; /* "do nothing" */
3060 : }
3061 4380 : else if (newtuple != oldtuple)
3062 : {
3063 1298 : ExecForceStoreHeapTuple(newtuple, newslot, false);
3064 :
3065 : /*
3066 : * If the tuple returned by the trigger / being stored, is the old
3067 : * row version, and the heap tuple passed to the trigger was
3068 : * allocated locally, materialize the slot. Otherwise we might
3069 : * free it while still referenced by the slot.
3070 : */
3071 1298 : if (should_free_trig && newtuple == trigtuple)
3072 0 : ExecMaterializeSlot(newslot);
3073 :
3074 1298 : if (should_free_new)
3075 2 : heap_freetuple(oldtuple);
3076 :
3077 : /* signal tuple should be re-fetched if used */
3078 1298 : newtuple = NULL;
3079 : }
3080 : }
3081 2382 : if (should_free_trig)
3082 0 : heap_freetuple(trigtuple);
3083 :
3084 2382 : return true;
3085 : }
3086 :
3087 : /*
3088 : * Note: 'src_partinfo' and 'dst_partinfo', when non-NULL, refer to the source
3089 : * and destination partitions, respectively, of a cross-partition update of
3090 : * the root partitioned table mentioned in the query, given by 'relinfo'.
3091 : * 'tupleid' in that case refers to the ctid of the "old" tuple in the source
3092 : * partition, and 'newslot' contains the "new" tuple in the destination
3093 : * partition. This interface allows to support the requirements of
3094 : * ExecCrossPartitionUpdateForeignKey(); is_crosspart_update must be true in
3095 : * that case.
3096 : */
3097 : void
3098 378614 : ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
3099 : ResultRelInfo *src_partinfo,
3100 : ResultRelInfo *dst_partinfo,
3101 : ItemPointer tupleid,
3102 : HeapTuple fdw_trigtuple,
3103 : TupleTableSlot *newslot,
3104 : List *recheckIndexes,
3105 : TransitionCaptureState *transition_capture,
3106 : bool is_crosspart_update)
3107 : {
3108 378614 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3109 :
3110 378614 : if ((trigdesc && trigdesc->trig_update_after_row) ||
3111 360 : (transition_capture &&
3112 360 : (transition_capture->tcs_update_old_table ||
3113 12 : transition_capture->tcs_update_new_table)))
3114 : {
3115 : /*
3116 : * Note: if the UPDATE is converted into a DELETE+INSERT as part of
3117 : * update-partition-key operation, then this function is also called
3118 : * separately for DELETE and INSERT to capture transition table rows.
3119 : * In such case, either old tuple or new tuple can be NULL.
3120 : */
3121 : TupleTableSlot *oldslot;
3122 : ResultRelInfo *tupsrc;
3123 :
3124 : Assert((src_partinfo != NULL && dst_partinfo != NULL) ||
3125 : !is_crosspart_update);
3126 :
3127 3690 : tupsrc = src_partinfo ? src_partinfo : relinfo;
3128 3690 : oldslot = ExecGetTriggerOldSlot(estate, tupsrc);
3129 :
3130 3690 : if (fdw_trigtuple == NULL && ItemPointerIsValid(tupleid))
3131 3628 : GetTupleForTrigger(estate,
3132 : NULL,
3133 : tupsrc,
3134 : tupleid,
3135 : LockTupleExclusive,
3136 : oldslot,
3137 : NULL,
3138 : NULL,
3139 : NULL);
3140 62 : else if (fdw_trigtuple != NULL)
3141 20 : ExecForceStoreHeapTuple(fdw_trigtuple, oldslot, false);
3142 : else
3143 42 : ExecClearTuple(oldslot);
3144 :
3145 3690 : AfterTriggerSaveEvent(estate, relinfo,
3146 : src_partinfo, dst_partinfo,
3147 : TRIGGER_EVENT_UPDATE,
3148 : true,
3149 : oldslot, newslot, recheckIndexes,
3150 : ExecGetAllUpdatedCols(relinfo, estate),
3151 : transition_capture,
3152 : is_crosspart_update);
3153 : }
3154 378614 : }
3155 :
3156 : bool
3157 192 : ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
3158 : HeapTuple trigtuple, TupleTableSlot *newslot)
3159 : {
3160 192 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3161 192 : TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
3162 192 : HeapTuple newtuple = NULL;
3163 : bool should_free;
3164 192 : TriggerData LocTriggerData = {0};
3165 : int i;
3166 :
3167 192 : LocTriggerData.type = T_TriggerData;
3168 192 : LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
3169 : TRIGGER_EVENT_ROW |
3170 : TRIGGER_EVENT_INSTEAD;
3171 192 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3172 :
3173 192 : ExecForceStoreHeapTuple(trigtuple, oldslot, false);
3174 :
3175 738 : for (i = 0; i < trigdesc->numtriggers; i++)
3176 : {
3177 570 : Trigger *trigger = &trigdesc->triggers[i];
3178 : HeapTuple oldtuple;
3179 :
3180 570 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3181 : TRIGGER_TYPE_ROW,
3182 : TRIGGER_TYPE_INSTEAD,
3183 : TRIGGER_TYPE_UPDATE))
3184 378 : continue;
3185 192 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3186 : NULL, oldslot, newslot))
3187 0 : continue;
3188 :
3189 192 : if (!newtuple)
3190 192 : newtuple = ExecFetchSlotHeapTuple(newslot, true, &should_free);
3191 :
3192 192 : LocTriggerData.tg_trigslot = oldslot;
3193 192 : LocTriggerData.tg_trigtuple = trigtuple;
3194 192 : LocTriggerData.tg_newslot = newslot;
3195 192 : LocTriggerData.tg_newtuple = oldtuple = newtuple;
3196 :
3197 192 : LocTriggerData.tg_trigger = trigger;
3198 192 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
3199 : i,
3200 : relinfo->ri_TrigFunctions,
3201 : relinfo->ri_TrigInstrument,
3202 192 : GetPerTupleMemoryContext(estate));
3203 186 : if (newtuple == NULL)
3204 : {
3205 18 : return false; /* "do nothing" */
3206 : }
3207 168 : else if (newtuple != oldtuple)
3208 : {
3209 54 : ExecForceStoreHeapTuple(newtuple, newslot, false);
3210 :
3211 54 : if (should_free)
3212 54 : heap_freetuple(oldtuple);
3213 :
3214 : /* signal tuple should be re-fetched if used */
3215 54 : newtuple = NULL;
3216 : }
3217 : }
3218 :
3219 168 : return true;
3220 : }
3221 :
3222 : void
3223 3684 : ExecBSTruncateTriggers(EState *estate, ResultRelInfo *relinfo)
3224 : {
3225 : TriggerDesc *trigdesc;
3226 : int i;
3227 3684 : TriggerData LocTriggerData = {0};
3228 :
3229 3684 : trigdesc = relinfo->ri_TrigDesc;
3230 :
3231 3684 : if (trigdesc == NULL)
3232 3672 : return;
3233 902 : if (!trigdesc->trig_truncate_before_statement)
3234 890 : return;
3235 :
3236 12 : LocTriggerData.type = T_TriggerData;
3237 12 : LocTriggerData.tg_event = TRIGGER_EVENT_TRUNCATE |
3238 : TRIGGER_EVENT_BEFORE;
3239 12 : LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
3240 :
3241 36 : for (i = 0; i < trigdesc->numtriggers; i++)
3242 : {
3243 24 : Trigger *trigger = &trigdesc->triggers[i];
3244 : HeapTuple newtuple;
3245 :
3246 24 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
3247 : TRIGGER_TYPE_STATEMENT,
3248 : TRIGGER_TYPE_BEFORE,
3249 : TRIGGER_TYPE_TRUNCATE))
3250 12 : continue;
3251 12 : if (!TriggerEnabled(estate, relinfo, trigger, LocTriggerData.tg_event,
3252 : NULL, NULL, NULL))
3253 0 : continue;
3254 :
3255 12 : LocTriggerData.tg_trigger = trigger;
3256 12 : newtuple = ExecCallTriggerFunc(&LocTriggerData,
3257 : i,
3258 : relinfo->ri_TrigFunctions,
3259 : relinfo->ri_TrigInstrument,
3260 12 : GetPerTupleMemoryContext(estate));
3261 :
3262 12 : if (newtuple)
3263 0 : ereport(ERROR,
3264 : (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
3265 : errmsg("BEFORE STATEMENT trigger cannot return a value")));
3266 : }
3267 : }
3268 :
3269 : void
3270 3676 : ExecASTruncateTriggers(EState *estate, ResultRelInfo *relinfo)
3271 : {
3272 3676 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
3273 :
3274 3676 : if (trigdesc && trigdesc->trig_truncate_after_statement)
3275 8 : AfterTriggerSaveEvent(estate, relinfo,
3276 : NULL, NULL,
3277 : TRIGGER_EVENT_TRUNCATE,
3278 : false, NULL, NULL, NIL, NULL, NULL,
3279 : false);
3280 3676 : }
3281 :
3282 :
3283 : /*
3284 : * Fetch tuple into "oldslot", dealing with locking and EPQ if necessary
3285 : */
3286 : static bool
3287 12736 : GetTupleForTrigger(EState *estate,
3288 : EPQState *epqstate,
3289 : ResultRelInfo *relinfo,
3290 : ItemPointer tid,
3291 : LockTupleMode lockmode,
3292 : TupleTableSlot *oldslot,
3293 : TupleTableSlot **epqslot,
3294 : TM_Result *tmresultp,
3295 : TM_FailureData *tmfdp)
3296 : {
3297 12736 : Relation relation = relinfo->ri_RelationDesc;
3298 :
3299 12736 : if (epqslot != NULL)
3300 : {
3301 : TM_Result test;
3302 : TM_FailureData tmfd;
3303 2912 : int lockflags = 0;
3304 :
3305 2912 : *epqslot = NULL;
3306 :
3307 : /* caller must pass an epqstate if EvalPlanQual is possible */
3308 : Assert(epqstate != NULL);
3309 :
3310 : /*
3311 : * lock tuple for update
3312 : */
3313 2912 : if (!IsolationUsesXactSnapshot())
3314 2048 : lockflags |= TUPLE_LOCK_FLAG_FIND_LAST_VERSION;
3315 2912 : test = table_tuple_lock(relation, tid, estate->es_snapshot, oldslot,
3316 : estate->es_output_cid,
3317 : lockmode, LockWaitBlock,
3318 : lockflags,
3319 : &tmfd);
3320 :
3321 : /* Let the caller know about the status of this operation */
3322 2908 : if (tmresultp)
3323 216 : *tmresultp = test;
3324 2908 : if (tmfdp)
3325 2902 : *tmfdp = tmfd;
3326 :
3327 2908 : switch (test)
3328 : {
3329 6 : case TM_SelfModified:
3330 :
3331 : /*
3332 : * The target tuple was already updated or deleted by the
3333 : * current command, or by a later command in the current
3334 : * transaction. We ignore the tuple in the former case, and
3335 : * throw error in the latter case, for the same reasons
3336 : * enumerated in ExecUpdate and ExecDelete in
3337 : * nodeModifyTable.c.
3338 : */
3339 6 : if (tmfd.cmax != estate->es_output_cid)
3340 6 : ereport(ERROR,
3341 : (errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
3342 : errmsg("tuple to be updated was already modified by an operation triggered by the current command"),
3343 : errhint("Consider using an AFTER trigger instead of a BEFORE trigger to propagate changes to other rows.")));
3344 :
3345 : /* treat it as deleted; do not process */
3346 32 : return false;
3347 :
3348 2884 : case TM_Ok:
3349 2884 : if (tmfd.traversed)
3350 : {
3351 : /*
3352 : * Recheck the tuple using EPQ. For MERGE, we leave this
3353 : * to the caller (it must do additional rechecking, and
3354 : * might end up executing a different action entirely).
3355 : */
3356 26 : if (estate->es_plannedstmt->commandType == CMD_MERGE)
3357 : {
3358 14 : if (tmresultp)
3359 14 : *tmresultp = TM_Updated;
3360 14 : return false;
3361 : }
3362 :
3363 12 : *epqslot = EvalPlanQual(epqstate,
3364 : relation,
3365 : relinfo->ri_RangeTableIndex,
3366 : oldslot);
3367 :
3368 : /*
3369 : * If PlanQual failed for updated tuple - we must not
3370 : * process this tuple!
3371 : */
3372 12 : if (TupIsNull(*epqslot))
3373 : {
3374 4 : *epqslot = NULL;
3375 4 : return false;
3376 : }
3377 : }
3378 2866 : break;
3379 :
3380 2 : case TM_Updated:
3381 2 : if (IsolationUsesXactSnapshot())
3382 2 : ereport(ERROR,
3383 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3384 : errmsg("could not serialize access due to concurrent update")));
3385 0 : elog(ERROR, "unexpected table_tuple_lock status: %u", test);
3386 : break;
3387 :
3388 16 : case TM_Deleted:
3389 16 : if (IsolationUsesXactSnapshot())
3390 2 : ereport(ERROR,
3391 : (errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
3392 : errmsg("could not serialize access due to concurrent delete")));
3393 : /* tuple was deleted */
3394 14 : return false;
3395 :
3396 0 : case TM_Invisible:
3397 0 : elog(ERROR, "attempted to lock invisible tuple");
3398 : break;
3399 :
3400 0 : default:
3401 0 : elog(ERROR, "unrecognized table_tuple_lock status: %u", test);
3402 : return false; /* keep compiler quiet */
3403 : }
3404 : }
3405 : else
3406 : {
3407 : /*
3408 : * We expect the tuple to be present, thus very simple error handling
3409 : * suffices.
3410 : */
3411 9824 : if (!table_tuple_fetch_row_version(relation, tid, SnapshotAny,
3412 : oldslot))
3413 0 : elog(ERROR, "failed to fetch tuple for trigger");
3414 : }
3415 :
3416 12690 : return true;
3417 : }
3418 :
3419 : /*
3420 : * Is trigger enabled to fire?
3421 : */
3422 : static bool
3423 24420 : TriggerEnabled(EState *estate, ResultRelInfo *relinfo,
3424 : Trigger *trigger, TriggerEvent event,
3425 : Bitmapset *modifiedCols,
3426 : TupleTableSlot *oldslot, TupleTableSlot *newslot)
3427 : {
3428 : /* Check replication-role-dependent enable state */
3429 24420 : if (SessionReplicationRole == SESSION_REPLICATION_ROLE_REPLICA)
3430 : {
3431 126 : if (trigger->tgenabled == TRIGGER_FIRES_ON_ORIGIN ||
3432 78 : trigger->tgenabled == TRIGGER_DISABLED)
3433 84 : return false;
3434 : }
3435 : else /* ORIGIN or LOCAL role */
3436 : {
3437 24294 : if (trigger->tgenabled == TRIGGER_FIRES_ON_REPLICA ||
3438 24292 : trigger->tgenabled == TRIGGER_DISABLED)
3439 158 : return false;
3440 : }
3441 :
3442 : /*
3443 : * Check for column-specific trigger (only possible for UPDATE, and in
3444 : * fact we *must* ignore tgattr for other event types)
3445 : */
3446 24178 : if (trigger->tgnattr > 0 && TRIGGER_FIRED_BY_UPDATE(event))
3447 : {
3448 : int i;
3449 : bool modified;
3450 :
3451 424 : modified = false;
3452 556 : for (i = 0; i < trigger->tgnattr; i++)
3453 : {
3454 472 : if (bms_is_member(trigger->tgattr[i] - FirstLowInvalidHeapAttributeNumber,
3455 : modifiedCols))
3456 : {
3457 340 : modified = true;
3458 340 : break;
3459 : }
3460 : }
3461 424 : if (!modified)
3462 84 : return false;
3463 : }
3464 :
3465 : /* Check for WHEN clause */
3466 24094 : if (trigger->tgqual)
3467 : {
3468 : ExprState **predicate;
3469 : ExprContext *econtext;
3470 : MemoryContext oldContext;
3471 : int i;
3472 :
3473 : Assert(estate != NULL);
3474 :
3475 : /*
3476 : * trigger is an element of relinfo->ri_TrigDesc->triggers[]; find the
3477 : * matching element of relinfo->ri_TrigWhenExprs[]
3478 : */
3479 450 : i = trigger - relinfo->ri_TrigDesc->triggers;
3480 450 : predicate = &relinfo->ri_TrigWhenExprs[i];
3481 :
3482 : /*
3483 : * If first time through for this WHEN expression, build expression
3484 : * nodetrees for it. Keep them in the per-query memory context so
3485 : * they'll survive throughout the query.
3486 : */
3487 450 : if (*predicate == NULL)
3488 : {
3489 : Node *tgqual;
3490 :
3491 242 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
3492 242 : tgqual = stringToNode(trigger->tgqual);
3493 : /* Change references to OLD and NEW to INNER_VAR and OUTER_VAR */
3494 242 : ChangeVarNodes(tgqual, PRS2_OLD_VARNO, INNER_VAR, 0);
3495 242 : ChangeVarNodes(tgqual, PRS2_NEW_VARNO, OUTER_VAR, 0);
3496 : /* ExecPrepareQual wants implicit-AND form */
3497 242 : tgqual = (Node *) make_ands_implicit((Expr *) tgqual);
3498 242 : *predicate = ExecPrepareQual((List *) tgqual, estate);
3499 242 : MemoryContextSwitchTo(oldContext);
3500 : }
3501 :
3502 : /*
3503 : * We will use the EState's per-tuple context for evaluating WHEN
3504 : * expressions (creating it if it's not already there).
3505 : */
3506 450 : econtext = GetPerTupleExprContext(estate);
3507 :
3508 : /*
3509 : * Finally evaluate the expression, making the old and/or new tuples
3510 : * available as INNER_VAR/OUTER_VAR respectively.
3511 : */
3512 450 : econtext->ecxt_innertuple = oldslot;
3513 450 : econtext->ecxt_outertuple = newslot;
3514 450 : if (!ExecQual(*predicate, econtext))
3515 240 : return false;
3516 : }
3517 :
3518 23854 : return true;
3519 : }
3520 :
3521 :
3522 : /* ----------
3523 : * After-trigger stuff
3524 : *
3525 : * The AfterTriggersData struct holds data about pending AFTER trigger events
3526 : * during the current transaction tree. (BEFORE triggers are fired
3527 : * immediately so we don't need any persistent state about them.) The struct
3528 : * and most of its subsidiary data are kept in TopTransactionContext; however
3529 : * some data that can be discarded sooner appears in the CurTransactionContext
3530 : * of the relevant subtransaction. Also, the individual event records are
3531 : * kept in a separate sub-context of TopTransactionContext. This is done
3532 : * mainly so that it's easy to tell from a memory context dump how much space
3533 : * is being eaten by trigger events.
3534 : *
3535 : * Because the list of pending events can grow large, we go to some
3536 : * considerable effort to minimize per-event memory consumption. The event
3537 : * records are grouped into chunks and common data for similar events in the
3538 : * same chunk is only stored once.
3539 : *
3540 : * XXX We need to be able to save the per-event data in a file if it grows too
3541 : * large.
3542 : * ----------
3543 : */
3544 :
3545 : /* Per-trigger SET CONSTRAINT status */
3546 : typedef struct SetConstraintTriggerData
3547 : {
3548 : Oid sct_tgoid;
3549 : bool sct_tgisdeferred;
3550 : } SetConstraintTriggerData;
3551 :
3552 : typedef struct SetConstraintTriggerData *SetConstraintTrigger;
3553 :
3554 : /*
3555 : * SET CONSTRAINT intra-transaction status.
3556 : *
3557 : * We make this a single palloc'd object so it can be copied and freed easily.
3558 : *
3559 : * all_isset and all_isdeferred are used to keep track
3560 : * of SET CONSTRAINTS ALL {DEFERRED, IMMEDIATE}.
3561 : *
3562 : * trigstates[] stores per-trigger tgisdeferred settings.
3563 : */
3564 : typedef struct SetConstraintStateData
3565 : {
3566 : bool all_isset;
3567 : bool all_isdeferred;
3568 : int numstates; /* number of trigstates[] entries in use */
3569 : int numalloc; /* allocated size of trigstates[] */
3570 : SetConstraintTriggerData trigstates[FLEXIBLE_ARRAY_MEMBER];
3571 : } SetConstraintStateData;
3572 :
3573 : typedef SetConstraintStateData *SetConstraintState;
3574 :
3575 :
3576 : /*
3577 : * Per-trigger-event data
3578 : *
3579 : * The actual per-event data, AfterTriggerEventData, includes DONE/IN_PROGRESS
3580 : * status bits, up to two tuple CTIDs, and optionally two OIDs of partitions.
3581 : * Each event record also has an associated AfterTriggerSharedData that is
3582 : * shared across all instances of similar events within a "chunk".
3583 : *
3584 : * For row-level triggers, we arrange not to waste storage on unneeded ctid
3585 : * fields. Updates of regular tables use two; inserts and deletes of regular
3586 : * tables use one; foreign tables always use zero and save the tuple(s) to a
3587 : * tuplestore. AFTER_TRIGGER_FDW_FETCH directs AfterTriggerExecute() to
3588 : * retrieve a fresh tuple or pair of tuples from that tuplestore, while
3589 : * AFTER_TRIGGER_FDW_REUSE directs it to use the most-recently-retrieved
3590 : * tuple(s). This permits storing tuples once regardless of the number of
3591 : * row-level triggers on a foreign table.
3592 : *
3593 : * When updates on partitioned tables cause rows to move between partitions,
3594 : * the OIDs of both partitions are stored too, so that the tuples can be
3595 : * fetched; such entries are marked AFTER_TRIGGER_CP_UPDATE (for "cross-
3596 : * partition update").
3597 : *
3598 : * Note that we need triggers on foreign tables to be fired in exactly the
3599 : * order they were queued, so that the tuples come out of the tuplestore in
3600 : * the right order. To ensure that, we forbid deferrable (constraint)
3601 : * triggers on foreign tables. This also ensures that such triggers do not
3602 : * get deferred into outer trigger query levels, meaning that it's okay to
3603 : * destroy the tuplestore at the end of the query level.
3604 : *
3605 : * Statement-level triggers always bear AFTER_TRIGGER_1CTID, though they
3606 : * require no ctid field. We lack the flag bit space to neatly represent that
3607 : * distinct case, and it seems unlikely to be worth much trouble.
3608 : *
3609 : * Note: ats_firing_id is initially zero and is set to something else when
3610 : * AFTER_TRIGGER_IN_PROGRESS is set. It indicates which trigger firing
3611 : * cycle the trigger will be fired in (or was fired in, if DONE is set).
3612 : * Although this is mutable state, we can keep it in AfterTriggerSharedData
3613 : * because all instances of the same type of event in a given event list will
3614 : * be fired at the same time, if they were queued between the same firing
3615 : * cycles. So we need only ensure that ats_firing_id is zero when attaching
3616 : * a new event to an existing AfterTriggerSharedData record.
3617 : */
3618 : typedef uint32 TriggerFlags;
3619 :
3620 : #define AFTER_TRIGGER_OFFSET 0x07FFFFFF /* must be low-order bits */
3621 : #define AFTER_TRIGGER_DONE 0x80000000
3622 : #define AFTER_TRIGGER_IN_PROGRESS 0x40000000
3623 : /* bits describing the size and tuple sources of this event */
3624 : #define AFTER_TRIGGER_FDW_REUSE 0x00000000
3625 : #define AFTER_TRIGGER_FDW_FETCH 0x20000000
3626 : #define AFTER_TRIGGER_1CTID 0x10000000
3627 : #define AFTER_TRIGGER_2CTID 0x30000000
3628 : #define AFTER_TRIGGER_CP_UPDATE 0x08000000
3629 : #define AFTER_TRIGGER_TUP_BITS 0x38000000
3630 : typedef struct AfterTriggerSharedData *AfterTriggerShared;
3631 :
3632 : typedef struct AfterTriggerSharedData
3633 : {
3634 : TriggerEvent ats_event; /* event type indicator, see trigger.h */
3635 : Oid ats_tgoid; /* the trigger's ID */
3636 : Oid ats_relid; /* the relation it's on */
3637 : CommandId ats_firing_id; /* ID for firing cycle */
3638 : struct AfterTriggersTableData *ats_table; /* transition table access */
3639 : Bitmapset *ats_modifiedcols; /* modified columns */
3640 : } AfterTriggerSharedData;
3641 :
3642 : typedef struct AfterTriggerEventData *AfterTriggerEvent;
3643 :
3644 : typedef struct AfterTriggerEventData
3645 : {
3646 : TriggerFlags ate_flags; /* status bits and offset to shared data */
3647 : ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3648 : ItemPointerData ate_ctid2; /* new updated tuple */
3649 :
3650 : /*
3651 : * During a cross-partition update of a partitioned table, we also store
3652 : * the OIDs of source and destination partitions that are needed to fetch
3653 : * the old (ctid1) and the new tuple (ctid2) from, respectively.
3654 : */
3655 : Oid ate_src_part;
3656 : Oid ate_dst_part;
3657 : } AfterTriggerEventData;
3658 :
3659 : /* AfterTriggerEventData, minus ate_src_part, ate_dst_part */
3660 : typedef struct AfterTriggerEventDataNoOids
3661 : {
3662 : TriggerFlags ate_flags;
3663 : ItemPointerData ate_ctid1;
3664 : ItemPointerData ate_ctid2;
3665 : } AfterTriggerEventDataNoOids;
3666 :
3667 : /* AfterTriggerEventData, minus ate_*_part and ate_ctid2 */
3668 : typedef struct AfterTriggerEventDataOneCtid
3669 : {
3670 : TriggerFlags ate_flags; /* status bits and offset to shared data */
3671 : ItemPointerData ate_ctid1; /* inserted, deleted, or old updated tuple */
3672 : } AfterTriggerEventDataOneCtid;
3673 :
3674 : /* AfterTriggerEventData, minus ate_*_part, ate_ctid1 and ate_ctid2 */
3675 : typedef struct AfterTriggerEventDataZeroCtids
3676 : {
3677 : TriggerFlags ate_flags; /* status bits and offset to shared data */
3678 : } AfterTriggerEventDataZeroCtids;
3679 :
3680 : #define SizeofTriggerEvent(evt) \
3681 : (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_CP_UPDATE ? \
3682 : sizeof(AfterTriggerEventData) : \
3683 : (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \
3684 : sizeof(AfterTriggerEventDataNoOids) : \
3685 : (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_1CTID ? \
3686 : sizeof(AfterTriggerEventDataOneCtid) : \
3687 : sizeof(AfterTriggerEventDataZeroCtids))))
3688 :
3689 : #define GetTriggerSharedData(evt) \
3690 : ((AfterTriggerShared) ((char *) (evt) + ((evt)->ate_flags & AFTER_TRIGGER_OFFSET)))
3691 :
3692 : /*
3693 : * To avoid palloc overhead, we keep trigger events in arrays in successively-
3694 : * larger chunks (a slightly more sophisticated version of an expansible
3695 : * array). The space between CHUNK_DATA_START and freeptr is occupied by
3696 : * AfterTriggerEventData records; the space between endfree and endptr is
3697 : * occupied by AfterTriggerSharedData records.
3698 : */
3699 : typedef struct AfterTriggerEventChunk
3700 : {
3701 : struct AfterTriggerEventChunk *next; /* list link */
3702 : char *freeptr; /* start of free space in chunk */
3703 : char *endfree; /* end of free space in chunk */
3704 : char *endptr; /* end of chunk */
3705 : /* event data follows here */
3706 : } AfterTriggerEventChunk;
3707 :
3708 : #define CHUNK_DATA_START(cptr) ((char *) (cptr) + MAXALIGN(sizeof(AfterTriggerEventChunk)))
3709 :
3710 : /* A list of events */
3711 : typedef struct AfterTriggerEventList
3712 : {
3713 : AfterTriggerEventChunk *head;
3714 : AfterTriggerEventChunk *tail;
3715 : char *tailfree; /* freeptr of tail chunk */
3716 : } AfterTriggerEventList;
3717 :
3718 : /* Macros to help in iterating over a list of events */
3719 : #define for_each_chunk(cptr, evtlist) \
3720 : for (cptr = (evtlist).head; cptr != NULL; cptr = cptr->next)
3721 : #define for_each_event(eptr, cptr) \
3722 : for (eptr = (AfterTriggerEvent) CHUNK_DATA_START(cptr); \
3723 : (char *) eptr < (cptr)->freeptr; \
3724 : eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3725 : /* Use this if no special per-chunk processing is needed */
3726 : #define for_each_event_chunk(eptr, cptr, evtlist) \
3727 : for_each_chunk(cptr, evtlist) for_each_event(eptr, cptr)
3728 :
3729 : /* Macros for iterating from a start point that might not be list start */
3730 : #define for_each_chunk_from(cptr) \
3731 : for (; cptr != NULL; cptr = cptr->next)
3732 : #define for_each_event_from(eptr, cptr) \
3733 : for (; \
3734 : (char *) eptr < (cptr)->freeptr; \
3735 : eptr = (AfterTriggerEvent) (((char *) eptr) + SizeofTriggerEvent(eptr)))
3736 :
3737 :
3738 : /*
3739 : * All per-transaction data for the AFTER TRIGGERS module.
3740 : *
3741 : * AfterTriggersData has the following fields:
3742 : *
3743 : * firing_counter is incremented for each call of afterTriggerInvokeEvents.
3744 : * We mark firable events with the current firing cycle's ID so that we can
3745 : * tell which ones to work on. This ensures sane behavior if a trigger
3746 : * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will
3747 : * only fire those events that weren't already scheduled for firing.
3748 : *
3749 : * state keeps track of the transaction-local effects of SET CONSTRAINTS.
3750 : * This is saved and restored across failed subtransactions.
3751 : *
3752 : * events is the current list of deferred events. This is global across
3753 : * all subtransactions of the current transaction. In a subtransaction
3754 : * abort, we know that the events added by the subtransaction are at the
3755 : * end of the list, so it is relatively easy to discard them. The event
3756 : * list chunks themselves are stored in event_cxt.
3757 : *
3758 : * query_depth is the current depth of nested AfterTriggerBeginQuery calls
3759 : * (-1 when the stack is empty).
3760 : *
3761 : * query_stack[query_depth] is the per-query-level data, including these fields:
3762 : *
3763 : * events is a list of AFTER trigger events queued by the current query.
3764 : * None of these are valid until the matching AfterTriggerEndQuery call
3765 : * occurs. At that point we fire immediate-mode triggers, and append any
3766 : * deferred events to the main events list.
3767 : *
3768 : * fdw_tuplestore is a tuplestore containing the foreign-table tuples
3769 : * needed by events queued by the current query. (Note: we use just one
3770 : * tuplestore even though more than one foreign table might be involved.
3771 : * This is okay because tuplestores don't really care what's in the tuples
3772 : * they store; but it's possible that someday it'd break.)
3773 : *
3774 : * tables is a List of AfterTriggersTableData structs for target tables
3775 : * of the current query (see below).
3776 : *
3777 : * maxquerydepth is just the allocated length of query_stack.
3778 : *
3779 : * trans_stack holds per-subtransaction data, including these fields:
3780 : *
3781 : * state is NULL or a pointer to a saved copy of the SET CONSTRAINTS
3782 : * state data. Each subtransaction level that modifies that state first
3783 : * saves a copy, which we use to restore the state if we abort.
3784 : *
3785 : * events is a copy of the events head/tail pointers,
3786 : * which we use to restore those values during subtransaction abort.
3787 : *
3788 : * query_depth is the subtransaction-start-time value of query_depth,
3789 : * which we similarly use to clean up at subtransaction abort.
3790 : *
3791 : * firing_counter is the subtransaction-start-time value of firing_counter.
3792 : * We use this to recognize which deferred triggers were fired (or marked
3793 : * for firing) within an aborted subtransaction.
3794 : *
3795 : * We use GetCurrentTransactionNestLevel() to determine the correct array
3796 : * index in trans_stack. maxtransdepth is the number of allocated entries in
3797 : * trans_stack. (By not keeping our own stack pointer, we can avoid trouble
3798 : * in cases where errors during subxact abort cause multiple invocations
3799 : * of AfterTriggerEndSubXact() at the same nesting depth.)
3800 : *
3801 : * We create an AfterTriggersTableData struct for each target table of the
3802 : * current query, and each operation mode (INSERT/UPDATE/DELETE), that has
3803 : * either transition tables or statement-level triggers. This is used to
3804 : * hold the relevant transition tables, as well as info tracking whether
3805 : * we already queued the statement triggers. (We use that info to prevent
3806 : * firing the same statement triggers more than once per statement, or really
3807 : * once per transition table set.) These structs, along with the transition
3808 : * table tuplestores, live in the (sub)transaction's CurTransactionContext.
3809 : * That's sufficient lifespan because we don't allow transition tables to be
3810 : * used by deferrable triggers, so they only need to survive until
3811 : * AfterTriggerEndQuery.
3812 : */
3813 : typedef struct AfterTriggersQueryData AfterTriggersQueryData;
3814 : typedef struct AfterTriggersTransData AfterTriggersTransData;
3815 : typedef struct AfterTriggersTableData AfterTriggersTableData;
3816 :
3817 : typedef struct AfterTriggersData
3818 : {
3819 : CommandId firing_counter; /* next firing ID to assign */
3820 : SetConstraintState state; /* the active S C state */
3821 : AfterTriggerEventList events; /* deferred-event list */
3822 : MemoryContext event_cxt; /* memory context for events, if any */
3823 :
3824 : /* per-query-level data: */
3825 : AfterTriggersQueryData *query_stack; /* array of structs shown below */
3826 : int query_depth; /* current index in above array */
3827 : int maxquerydepth; /* allocated len of above array */
3828 :
3829 : /* per-subtransaction-level data: */
3830 : AfterTriggersTransData *trans_stack; /* array of structs shown below */
3831 : int maxtransdepth; /* allocated len of above array */
3832 : } AfterTriggersData;
3833 :
3834 : struct AfterTriggersQueryData
3835 : {
3836 : AfterTriggerEventList events; /* events pending from this query */
3837 : Tuplestorestate *fdw_tuplestore; /* foreign tuples for said events */
3838 : List *tables; /* list of AfterTriggersTableData, see below */
3839 : };
3840 :
3841 : struct AfterTriggersTransData
3842 : {
3843 : /* these fields are just for resetting at subtrans abort: */
3844 : SetConstraintState state; /* saved S C state, or NULL if not yet saved */
3845 : AfterTriggerEventList events; /* saved list pointer */
3846 : int query_depth; /* saved query_depth */
3847 : CommandId firing_counter; /* saved firing_counter */
3848 : };
3849 :
3850 : struct AfterTriggersTableData
3851 : {
3852 : /* relid + cmdType form the lookup key for these structs: */
3853 : Oid relid; /* target table's OID */
3854 : CmdType cmdType; /* event type, CMD_INSERT/UPDATE/DELETE */
3855 : bool closed; /* true when no longer OK to add tuples */
3856 : bool before_trig_done; /* did we already queue BS triggers? */
3857 : bool after_trig_done; /* did we already queue AS triggers? */
3858 : AfterTriggerEventList after_trig_events; /* if so, saved list pointer */
3859 :
3860 : /*
3861 : * We maintain separate transition tables for UPDATE/INSERT/DELETE since
3862 : * MERGE can run all three actions in a single statement. Note that UPDATE
3863 : * needs both old and new transition tables whereas INSERT needs only new,
3864 : * and DELETE needs only old.
3865 : */
3866 :
3867 : /* "old" transition table for UPDATE, if any */
3868 : Tuplestorestate *old_upd_tuplestore;
3869 : /* "new" transition table for UPDATE, if any */
3870 : Tuplestorestate *new_upd_tuplestore;
3871 : /* "old" transition table for DELETE, if any */
3872 : Tuplestorestate *old_del_tuplestore;
3873 : /* "new" transition table for INSERT, if any */
3874 : Tuplestorestate *new_ins_tuplestore;
3875 :
3876 : TupleTableSlot *storeslot; /* for converting to tuplestore's format */
3877 : };
3878 :
3879 : static AfterTriggersData afterTriggers;
3880 :
3881 : static void AfterTriggerExecute(EState *estate,
3882 : AfterTriggerEvent event,
3883 : ResultRelInfo *relInfo,
3884 : ResultRelInfo *src_relInfo,
3885 : ResultRelInfo *dst_relInfo,
3886 : TriggerDesc *trigdesc,
3887 : FmgrInfo *finfo,
3888 : Instrumentation *instr,
3889 : MemoryContext per_tuple_context,
3890 : TupleTableSlot *trig_tuple_slot1,
3891 : TupleTableSlot *trig_tuple_slot2);
3892 : static AfterTriggersTableData *GetAfterTriggersTableData(Oid relid,
3893 : CmdType cmdType);
3894 : static TupleTableSlot *GetAfterTriggersStoreSlot(AfterTriggersTableData *table,
3895 : TupleDesc tupdesc);
3896 : static Tuplestorestate *GetAfterTriggersTransitionTable(int event,
3897 : TupleTableSlot *oldslot,
3898 : TupleTableSlot *newslot,
3899 : TransitionCaptureState *transition_capture);
3900 : static void TransitionTableAddTuple(EState *estate,
3901 : TransitionCaptureState *transition_capture,
3902 : ResultRelInfo *relinfo,
3903 : TupleTableSlot *slot,
3904 : TupleTableSlot *original_insert_tuple,
3905 : Tuplestorestate *tuplestore);
3906 : static void AfterTriggerFreeQuery(AfterTriggersQueryData *qs);
3907 : static SetConstraintState SetConstraintStateCreate(int numalloc);
3908 : static SetConstraintState SetConstraintStateCopy(SetConstraintState origstate);
3909 : static SetConstraintState SetConstraintStateAddItem(SetConstraintState state,
3910 : Oid tgoid, bool tgisdeferred);
3911 : static void cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent);
3912 :
3913 :
3914 : /*
3915 : * Get the FDW tuplestore for the current trigger query level, creating it
3916 : * if necessary.
3917 : */
3918 : static Tuplestorestate *
3919 100 : GetCurrentFDWTuplestore(void)
3920 : {
3921 : Tuplestorestate *ret;
3922 :
3923 100 : ret = afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore;
3924 100 : if (ret == NULL)
3925 : {
3926 : MemoryContext oldcxt;
3927 : ResourceOwner saveResourceOwner;
3928 :
3929 : /*
3930 : * Make the tuplestore valid until end of subtransaction. We really
3931 : * only need it until AfterTriggerEndQuery().
3932 : */
3933 36 : oldcxt = MemoryContextSwitchTo(CurTransactionContext);
3934 36 : saveResourceOwner = CurrentResourceOwner;
3935 36 : CurrentResourceOwner = CurTransactionResourceOwner;
3936 :
3937 36 : ret = tuplestore_begin_heap(false, false, work_mem);
3938 :
3939 36 : CurrentResourceOwner = saveResourceOwner;
3940 36 : MemoryContextSwitchTo(oldcxt);
3941 :
3942 36 : afterTriggers.query_stack[afterTriggers.query_depth].fdw_tuplestore = ret;
3943 : }
3944 :
3945 100 : return ret;
3946 : }
3947 :
3948 : /* ----------
3949 : * afterTriggerCheckState()
3950 : *
3951 : * Returns true if the trigger event is actually in state DEFERRED.
3952 : * ----------
3953 : */
3954 : static bool
3955 11746 : afterTriggerCheckState(AfterTriggerShared evtshared)
3956 : {
3957 11746 : Oid tgoid = evtshared->ats_tgoid;
3958 11746 : SetConstraintState state = afterTriggers.state;
3959 : int i;
3960 :
3961 : /*
3962 : * For not-deferrable triggers (i.e. normal AFTER ROW triggers and
3963 : * constraints declared NOT DEFERRABLE), the state is always false.
3964 : */
3965 11746 : if ((evtshared->ats_event & AFTER_TRIGGER_DEFERRABLE) == 0)
3966 11042 : return false;
3967 :
3968 : /*
3969 : * If constraint state exists, SET CONSTRAINTS might have been executed
3970 : * either for this trigger or for all triggers.
3971 : */
3972 704 : if (state != NULL)
3973 : {
3974 : /* Check for SET CONSTRAINTS for this specific trigger. */
3975 312 : for (i = 0; i < state->numstates; i++)
3976 : {
3977 246 : if (state->trigstates[i].sct_tgoid == tgoid)
3978 60 : return state->trigstates[i].sct_tgisdeferred;
3979 : }
3980 :
3981 : /* Check for SET CONSTRAINTS ALL. */
3982 66 : if (state->all_isset)
3983 54 : return state->all_isdeferred;
3984 : }
3985 :
3986 : /*
3987 : * Otherwise return the default state for the trigger.
3988 : */
3989 590 : return ((evtshared->ats_event & AFTER_TRIGGER_INITDEFERRED) != 0);
3990 : }
3991 :
3992 : /* ----------
3993 : * afterTriggerCopyBitmap()
3994 : *
3995 : * Copy bitmap into AfterTriggerEvents memory context, which is where the after
3996 : * trigger events are kept.
3997 : * ----------
3998 : */
3999 : static Bitmapset *
4000 11802 : afterTriggerCopyBitmap(Bitmapset *src)
4001 : {
4002 : Bitmapset *dst;
4003 : MemoryContext oldcxt;
4004 :
4005 11802 : if (src == NULL)
4006 8106 : return NULL;
4007 :
4008 : /* Create event context if we didn't already */
4009 3696 : if (afterTriggers.event_cxt == NULL)
4010 1340 : afterTriggers.event_cxt =
4011 1340 : AllocSetContextCreate(TopTransactionContext,
4012 : "AfterTriggerEvents",
4013 : ALLOCSET_DEFAULT_SIZES);
4014 :
4015 3696 : oldcxt = MemoryContextSwitchTo(afterTriggers.event_cxt);
4016 :
4017 3696 : dst = bms_copy(src);
4018 :
4019 3696 : MemoryContextSwitchTo(oldcxt);
4020 :
4021 3696 : return dst;
4022 : }
4023 :
4024 : /* ----------
4025 : * afterTriggerAddEvent()
4026 : *
4027 : * Add a new trigger event to the specified queue.
4028 : * The passed-in event data is copied.
4029 : * ----------
4030 : */
4031 : static void
4032 12386 : afterTriggerAddEvent(AfterTriggerEventList *events,
4033 : AfterTriggerEvent event, AfterTriggerShared evtshared)
4034 : {
4035 12386 : Size eventsize = SizeofTriggerEvent(event);
4036 12386 : Size needed = eventsize + sizeof(AfterTriggerSharedData);
4037 : AfterTriggerEventChunk *chunk;
4038 : AfterTriggerShared newshared;
4039 : AfterTriggerEvent newevent;
4040 :
4041 : /*
4042 : * If empty list or not enough room in the tail chunk, make a new chunk.
4043 : * We assume here that a new shared record will always be needed.
4044 : */
4045 12386 : chunk = events->tail;
4046 12386 : if (chunk == NULL ||
4047 4636 : chunk->endfree - chunk->freeptr < needed)
4048 : {
4049 : Size chunksize;
4050 :
4051 : /* Create event context if we didn't already */
4052 7750 : if (afterTriggers.event_cxt == NULL)
4053 5218 : afterTriggers.event_cxt =
4054 5218 : AllocSetContextCreate(TopTransactionContext,
4055 : "AfterTriggerEvents",
4056 : ALLOCSET_DEFAULT_SIZES);
4057 :
4058 : /*
4059 : * Chunk size starts at 1KB and is allowed to increase up to 1MB.
4060 : * These numbers are fairly arbitrary, though there is a hard limit at
4061 : * AFTER_TRIGGER_OFFSET; else we couldn't link event records to their
4062 : * shared records using the available space in ate_flags. Another
4063 : * constraint is that if the chunk size gets too huge, the search loop
4064 : * below would get slow given a (not too common) usage pattern with
4065 : * many distinct event types in a chunk. Therefore, we double the
4066 : * preceding chunk size only if there weren't too many shared records
4067 : * in the preceding chunk; otherwise we halve it. This gives us some
4068 : * ability to adapt to the actual usage pattern of the current query
4069 : * while still having large chunk sizes in typical usage. All chunk
4070 : * sizes used should be MAXALIGN multiples, to ensure that the shared
4071 : * records will be aligned safely.
4072 : */
4073 : #define MIN_CHUNK_SIZE 1024
4074 : #define MAX_CHUNK_SIZE (1024*1024)
4075 :
4076 : #if MAX_CHUNK_SIZE > (AFTER_TRIGGER_OFFSET+1)
4077 : #error MAX_CHUNK_SIZE must not exceed AFTER_TRIGGER_OFFSET
4078 : #endif
4079 :
4080 7750 : if (chunk == NULL)
4081 7750 : chunksize = MIN_CHUNK_SIZE;
4082 : else
4083 : {
4084 : /* preceding chunk size... */
4085 0 : chunksize = chunk->endptr - (char *) chunk;
4086 : /* check number of shared records in preceding chunk */
4087 0 : if ((chunk->endptr - chunk->endfree) <=
4088 : (100 * sizeof(AfterTriggerSharedData)))
4089 0 : chunksize *= 2; /* okay, double it */
4090 : else
4091 0 : chunksize /= 2; /* too many shared records */
4092 0 : chunksize = Min(chunksize, MAX_CHUNK_SIZE);
4093 : }
4094 7750 : chunk = MemoryContextAlloc(afterTriggers.event_cxt, chunksize);
4095 7750 : chunk->next = NULL;
4096 7750 : chunk->freeptr = CHUNK_DATA_START(chunk);
4097 7750 : chunk->endptr = chunk->endfree = (char *) chunk + chunksize;
4098 : Assert(chunk->endfree - chunk->freeptr >= needed);
4099 :
4100 7750 : if (events->tail == NULL)
4101 : {
4102 : Assert(events->head == NULL);
4103 7750 : events->head = chunk;
4104 : }
4105 : else
4106 0 : events->tail->next = chunk;
4107 7750 : events->tail = chunk;
4108 : /* events->tailfree is now out of sync, but we'll fix it below */
4109 : }
4110 :
4111 : /*
4112 : * Try to locate a matching shared-data record already in the chunk. If
4113 : * none, make a new one.
4114 : */
4115 12386 : for (newshared = ((AfterTriggerShared) chunk->endptr) - 1;
4116 17604 : (char *) newshared >= chunk->endfree;
4117 5218 : newshared--)
4118 : {
4119 6736 : if (newshared->ats_tgoid == evtshared->ats_tgoid &&
4120 1698 : newshared->ats_relid == evtshared->ats_relid &&
4121 1698 : newshared->ats_event == evtshared->ats_event &&
4122 1692 : newshared->ats_table == evtshared->ats_table &&
4123 1656 : newshared->ats_firing_id == 0)
4124 1518 : break;
4125 : }
4126 12386 : if ((char *) newshared < chunk->endfree)
4127 : {
4128 10868 : *newshared = *evtshared;
4129 10868 : newshared->ats_firing_id = 0; /* just to be sure */
4130 10868 : chunk->endfree = (char *) newshared;
4131 : }
4132 :
4133 : /* Insert the data */
4134 12386 : newevent = (AfterTriggerEvent) chunk->freeptr;
4135 12386 : memcpy(newevent, event, eventsize);
4136 : /* ... and link the new event to its shared record */
4137 12386 : newevent->ate_flags &= ~AFTER_TRIGGER_OFFSET;
4138 12386 : newevent->ate_flags |= (char *) newshared - (char *) newevent;
4139 :
4140 12386 : chunk->freeptr += eventsize;
4141 12386 : events->tailfree = chunk->freeptr;
4142 12386 : }
4143 :
4144 : /* ----------
4145 : * afterTriggerFreeEventList()
4146 : *
4147 : * Free all the event storage in the given list.
4148 : * ----------
4149 : */
4150 : static void
4151 16938 : afterTriggerFreeEventList(AfterTriggerEventList *events)
4152 : {
4153 : AfterTriggerEventChunk *chunk;
4154 :
4155 23224 : while ((chunk = events->head) != NULL)
4156 : {
4157 6286 : events->head = chunk->next;
4158 6286 : pfree(chunk);
4159 : }
4160 16938 : events->tail = NULL;
4161 16938 : events->tailfree = NULL;
4162 16938 : }
4163 :
4164 : /* ----------
4165 : * afterTriggerRestoreEventList()
4166 : *
4167 : * Restore an event list to its prior length, removing all the events
4168 : * added since it had the value old_events.
4169 : * ----------
4170 : */
4171 : static void
4172 9268 : afterTriggerRestoreEventList(AfterTriggerEventList *events,
4173 : const AfterTriggerEventList *old_events)
4174 : {
4175 : AfterTriggerEventChunk *chunk;
4176 : AfterTriggerEventChunk *next_chunk;
4177 :
4178 9268 : if (old_events->tail == NULL)
4179 : {
4180 : /* restoring to a completely empty state, so free everything */
4181 9246 : afterTriggerFreeEventList(events);
4182 : }
4183 : else
4184 : {
4185 22 : *events = *old_events;
4186 : /* free any chunks after the last one we want to keep */
4187 22 : for (chunk = events->tail->next; chunk != NULL; chunk = next_chunk)
4188 : {
4189 0 : next_chunk = chunk->next;
4190 0 : pfree(chunk);
4191 : }
4192 : /* and clean up the tail chunk to be the right length */
4193 22 : events->tail->next = NULL;
4194 22 : events->tail->freeptr = events->tailfree;
4195 :
4196 : /*
4197 : * We don't make any effort to remove now-unused shared data records.
4198 : * They might still be useful, anyway.
4199 : */
4200 : }
4201 9268 : }
4202 :
4203 : /* ----------
4204 : * afterTriggerDeleteHeadEventChunk()
4205 : *
4206 : * Remove the first chunk of events from the query level's event list.
4207 : * Keep any event list pointers elsewhere in the query level's data
4208 : * structures in sync.
4209 : * ----------
4210 : */
4211 : static void
4212 0 : afterTriggerDeleteHeadEventChunk(AfterTriggersQueryData *qs)
4213 : {
4214 0 : AfterTriggerEventChunk *target = qs->events.head;
4215 : ListCell *lc;
4216 :
4217 : Assert(target && target->next);
4218 :
4219 : /*
4220 : * First, update any pointers in the per-table data, so that they won't be
4221 : * dangling. Resetting obsoleted pointers to NULL will make
4222 : * cancel_prior_stmt_triggers start from the list head, which is fine.
4223 : */
4224 0 : foreach(lc, qs->tables)
4225 : {
4226 0 : AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc);
4227 :
4228 0 : if (table->after_trig_done &&
4229 0 : table->after_trig_events.tail == target)
4230 : {
4231 0 : table->after_trig_events.head = NULL;
4232 0 : table->after_trig_events.tail = NULL;
4233 0 : table->after_trig_events.tailfree = NULL;
4234 : }
4235 : }
4236 :
4237 : /* Now we can flush the head chunk */
4238 0 : qs->events.head = target->next;
4239 0 : pfree(target);
4240 0 : }
4241 :
4242 :
4243 : /* ----------
4244 : * AfterTriggerExecute()
4245 : *
4246 : * Fetch the required tuples back from the heap and fire one
4247 : * single trigger function.
4248 : *
4249 : * Frequently, this will be fired many times in a row for triggers of
4250 : * a single relation. Therefore, we cache the open relation and provide
4251 : * fmgr lookup cache space at the caller level. (For triggers fired at
4252 : * the end of a query, we can even piggyback on the executor's state.)
4253 : *
4254 : * When fired for a cross-partition update of a partitioned table, the old
4255 : * tuple is fetched using 'src_relInfo' (the source leaf partition) and
4256 : * the new tuple using 'dst_relInfo' (the destination leaf partition), though
4257 : * both are converted into the root partitioned table's format before passing
4258 : * to the trigger function.
4259 : *
4260 : * event: event currently being fired.
4261 : * relInfo: result relation for event.
4262 : * src_relInfo: source partition of a cross-partition update
4263 : * dst_relInfo: its destination partition
4264 : * trigdesc: working copy of rel's trigger info.
4265 : * finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
4266 : * instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
4267 : * or NULL if no instrumentation is wanted.
4268 : * per_tuple_context: memory context to call trigger function in.
4269 : * trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
4270 : * trig_tuple_slot2: scratch slot for tg_newtuple (foreign tables only)
4271 : * ----------
4272 : */
4273 : static void
4274 11450 : AfterTriggerExecute(EState *estate,
4275 : AfterTriggerEvent event,
4276 : ResultRelInfo *relInfo,
4277 : ResultRelInfo *src_relInfo,
4278 : ResultRelInfo *dst_relInfo,
4279 : TriggerDesc *trigdesc,
4280 : FmgrInfo *finfo, Instrumentation *instr,
4281 : MemoryContext per_tuple_context,
4282 : TupleTableSlot *trig_tuple_slot1,
4283 : TupleTableSlot *trig_tuple_slot2)
4284 : {
4285 11450 : Relation rel = relInfo->ri_RelationDesc;
4286 11450 : Relation src_rel = src_relInfo->ri_RelationDesc;
4287 11450 : Relation dst_rel = dst_relInfo->ri_RelationDesc;
4288 11450 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
4289 11450 : Oid tgoid = evtshared->ats_tgoid;
4290 11450 : TriggerData LocTriggerData = {0};
4291 : HeapTuple rettuple;
4292 : int tgindx;
4293 11450 : bool should_free_trig = false;
4294 11450 : bool should_free_new = false;
4295 :
4296 : /*
4297 : * Locate trigger in trigdesc. It might not be present, and in fact the
4298 : * trigdesc could be NULL, if the trigger was dropped since the event was
4299 : * queued. In that case, silently do nothing.
4300 : */
4301 11450 : if (trigdesc == NULL)
4302 6 : return;
4303 25574 : for (tgindx = 0; tgindx < trigdesc->numtriggers; tgindx++)
4304 : {
4305 25574 : if (trigdesc->triggers[tgindx].tgoid == tgoid)
4306 : {
4307 11444 : LocTriggerData.tg_trigger = &(trigdesc->triggers[tgindx]);
4308 11444 : break;
4309 : }
4310 : }
4311 11444 : if (LocTriggerData.tg_trigger == NULL)
4312 0 : return;
4313 :
4314 : /*
4315 : * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
4316 : * to include time spent re-fetching tuples in the trigger cost.
4317 : */
4318 11444 : if (instr)
4319 0 : InstrStartNode(instr + tgindx);
4320 :
4321 : /*
4322 : * Fetch the required tuple(s).
4323 : */
4324 11444 : switch (event->ate_flags & AFTER_TRIGGER_TUP_BITS)
4325 : {
4326 50 : case AFTER_TRIGGER_FDW_FETCH:
4327 : {
4328 50 : Tuplestorestate *fdw_tuplestore = GetCurrentFDWTuplestore();
4329 :
4330 50 : if (!tuplestore_gettupleslot(fdw_tuplestore, true, false,
4331 : trig_tuple_slot1))
4332 0 : elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4333 :
4334 50 : if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4335 18 : TRIGGER_EVENT_UPDATE &&
4336 18 : !tuplestore_gettupleslot(fdw_tuplestore, true, false,
4337 : trig_tuple_slot2))
4338 0 : elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4339 : }
4340 : /* fall through */
4341 : case AFTER_TRIGGER_FDW_REUSE:
4342 :
4343 : /*
4344 : * Store tuple in the slot so that tg_trigtuple does not reference
4345 : * tuplestore memory. (It is formally possible for the trigger
4346 : * function to queue trigger events that add to the same
4347 : * tuplestore, which can push other tuples out of memory.) The
4348 : * distinction is academic, because we start with a minimal tuple
4349 : * that is stored as a heap tuple, constructed in different memory
4350 : * context, in the slot anyway.
4351 : */
4352 58 : LocTriggerData.tg_trigslot = trig_tuple_slot1;
4353 58 : LocTriggerData.tg_trigtuple =
4354 58 : ExecFetchSlotHeapTuple(trig_tuple_slot1, true, &should_free_trig);
4355 :
4356 58 : if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) ==
4357 : TRIGGER_EVENT_UPDATE)
4358 : {
4359 22 : LocTriggerData.tg_newslot = trig_tuple_slot2;
4360 22 : LocTriggerData.tg_newtuple =
4361 22 : ExecFetchSlotHeapTuple(trig_tuple_slot2, true, &should_free_new);
4362 : }
4363 : else
4364 : {
4365 36 : LocTriggerData.tg_newtuple = NULL;
4366 : }
4367 58 : break;
4368 :
4369 11386 : default:
4370 11386 : if (ItemPointerIsValid(&(event->ate_ctid1)))
4371 : {
4372 10382 : TupleTableSlot *src_slot = ExecGetTriggerOldSlot(estate,
4373 : src_relInfo);
4374 :
4375 10382 : if (!table_tuple_fetch_row_version(src_rel,
4376 : &(event->ate_ctid1),
4377 : SnapshotAny,
4378 : src_slot))
4379 0 : elog(ERROR, "failed to fetch tuple1 for AFTER trigger");
4380 :
4381 : /*
4382 : * Store the tuple fetched from the source partition into the
4383 : * target (root partitioned) table slot, converting if needed.
4384 : */
4385 10382 : if (src_relInfo != relInfo)
4386 : {
4387 144 : TupleConversionMap *map = ExecGetChildToRootMap(src_relInfo);
4388 :
4389 144 : LocTriggerData.tg_trigslot = ExecGetTriggerOldSlot(estate, relInfo);
4390 144 : if (map)
4391 : {
4392 36 : execute_attr_map_slot(map->attrMap,
4393 : src_slot,
4394 : LocTriggerData.tg_trigslot);
4395 : }
4396 : else
4397 108 : ExecCopySlot(LocTriggerData.tg_trigslot, src_slot);
4398 : }
4399 : else
4400 10238 : LocTriggerData.tg_trigslot = src_slot;
4401 10382 : LocTriggerData.tg_trigtuple =
4402 10382 : ExecFetchSlotHeapTuple(LocTriggerData.tg_trigslot, false, &should_free_trig);
4403 : }
4404 : else
4405 : {
4406 1004 : LocTriggerData.tg_trigtuple = NULL;
4407 : }
4408 :
4409 : /* don't touch ctid2 if not there */
4410 11386 : if (((event->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ||
4411 11530 : (event->ate_flags & AFTER_TRIGGER_CP_UPDATE)) &&
4412 3080 : ItemPointerIsValid(&(event->ate_ctid2)))
4413 3080 : {
4414 3080 : TupleTableSlot *dst_slot = ExecGetTriggerNewSlot(estate,
4415 : dst_relInfo);
4416 :
4417 3080 : if (!table_tuple_fetch_row_version(dst_rel,
4418 : &(event->ate_ctid2),
4419 : SnapshotAny,
4420 : dst_slot))
4421 0 : elog(ERROR, "failed to fetch tuple2 for AFTER trigger");
4422 :
4423 : /*
4424 : * Store the tuple fetched from the destination partition into
4425 : * the target (root partitioned) table slot, converting if
4426 : * needed.
4427 : */
4428 3080 : if (dst_relInfo != relInfo)
4429 : {
4430 144 : TupleConversionMap *map = ExecGetChildToRootMap(dst_relInfo);
4431 :
4432 144 : LocTriggerData.tg_newslot = ExecGetTriggerNewSlot(estate, relInfo);
4433 144 : if (map)
4434 : {
4435 36 : execute_attr_map_slot(map->attrMap,
4436 : dst_slot,
4437 : LocTriggerData.tg_newslot);
4438 : }
4439 : else
4440 108 : ExecCopySlot(LocTriggerData.tg_newslot, dst_slot);
4441 : }
4442 : else
4443 2936 : LocTriggerData.tg_newslot = dst_slot;
4444 3080 : LocTriggerData.tg_newtuple =
4445 3080 : ExecFetchSlotHeapTuple(LocTriggerData.tg_newslot, false, &should_free_new);
4446 : }
4447 : else
4448 : {
4449 8306 : LocTriggerData.tg_newtuple = NULL;
4450 : }
4451 : }
4452 :
4453 : /*
4454 : * Set up the tuplestore information to let the trigger have access to
4455 : * transition tables. When we first make a transition table available to
4456 : * a trigger, mark it "closed" so that it cannot change anymore. If any
4457 : * additional events of the same type get queued in the current trigger
4458 : * query level, they'll go into new transition tables.
4459 : */
4460 11444 : LocTriggerData.tg_oldtable = LocTriggerData.tg_newtable = NULL;
4461 11444 : if (evtshared->ats_table)
4462 : {
4463 534 : if (LocTriggerData.tg_trigger->tgoldtable)
4464 : {
4465 300 : if (TRIGGER_FIRED_BY_UPDATE(evtshared->ats_event))
4466 156 : LocTriggerData.tg_oldtable = evtshared->ats_table->old_upd_tuplestore;
4467 : else
4468 144 : LocTriggerData.tg_oldtable = evtshared->ats_table->old_del_tuplestore;
4469 300 : evtshared->ats_table->closed = true;
4470 : }
4471 :
4472 534 : if (LocTriggerData.tg_trigger->tgnewtable)
4473 : {
4474 384 : if (TRIGGER_FIRED_BY_INSERT(evtshared->ats_event))
4475 210 : LocTriggerData.tg_newtable = evtshared->ats_table->new_ins_tuplestore;
4476 : else
4477 174 : LocTriggerData.tg_newtable = evtshared->ats_table->new_upd_tuplestore;
4478 384 : evtshared->ats_table->closed = true;
4479 : }
4480 : }
4481 :
4482 : /*
4483 : * Setup the remaining trigger information
4484 : */
4485 11444 : LocTriggerData.type = T_TriggerData;
4486 11444 : LocTriggerData.tg_event =
4487 11444 : evtshared->ats_event & (TRIGGER_EVENT_OPMASK | TRIGGER_EVENT_ROW);
4488 11444 : LocTriggerData.tg_relation = rel;
4489 11444 : if (TRIGGER_FOR_UPDATE(LocTriggerData.tg_trigger->tgtype))
4490 5362 : LocTriggerData.tg_updatedcols = evtshared->ats_modifiedcols;
4491 :
4492 11444 : MemoryContextReset(per_tuple_context);
4493 :
4494 : /*
4495 : * Call the trigger and throw away any possibly returned updated tuple.
4496 : * (Don't let ExecCallTriggerFunc measure EXPLAIN time.)
4497 : */
4498 11444 : rettuple = ExecCallTriggerFunc(&LocTriggerData,
4499 : tgindx,
4500 : finfo,
4501 : NULL,
4502 : per_tuple_context);
4503 10208 : if (rettuple != NULL &&
4504 3340 : rettuple != LocTriggerData.tg_trigtuple &&
4505 1410 : rettuple != LocTriggerData.tg_newtuple)
4506 0 : heap_freetuple(rettuple);
4507 :
4508 : /*
4509 : * Release resources
4510 : */
4511 10208 : if (should_free_trig)
4512 172 : heap_freetuple(LocTriggerData.tg_trigtuple);
4513 10208 : if (should_free_new)
4514 136 : heap_freetuple(LocTriggerData.tg_newtuple);
4515 :
4516 : /* don't clear slots' contents if foreign table */
4517 10208 : if (trig_tuple_slot1 == NULL)
4518 : {
4519 10138 : if (LocTriggerData.tg_trigslot)
4520 9188 : ExecClearTuple(LocTriggerData.tg_trigslot);
4521 10138 : if (LocTriggerData.tg_newslot)
4522 2740 : ExecClearTuple(LocTriggerData.tg_newslot);
4523 : }
4524 :
4525 : /*
4526 : * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
4527 : * one "tuple returned" (really the number of firings).
4528 : */
4529 10208 : if (instr)
4530 0 : InstrStopNode(instr + tgindx, 1);
4531 : }
4532 :
4533 :
4534 : /*
4535 : * afterTriggerMarkEvents()
4536 : *
4537 : * Scan the given event list for not yet invoked events. Mark the ones
4538 : * that can be invoked now with the current firing ID.
4539 : *
4540 : * If move_list isn't NULL, events that are not to be invoked now are
4541 : * transferred to move_list.
4542 : *
4543 : * When immediate_only is true, do not invoke currently-deferred triggers.
4544 : * (This will be false only at main transaction exit.)
4545 : *
4546 : * Returns true if any invokable events were found.
4547 : */
4548 : static bool
4549 722492 : afterTriggerMarkEvents(AfterTriggerEventList *events,
4550 : AfterTriggerEventList *move_list,
4551 : bool immediate_only)
4552 : {
4553 722492 : bool found = false;
4554 722492 : bool deferred_found = false;
4555 : AfterTriggerEvent event;
4556 : AfterTriggerEventChunk *chunk;
4557 :
4558 743536 : for_each_event_chunk(event, chunk, *events)
4559 : {
4560 13068 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
4561 13068 : bool defer_it = false;
4562 :
4563 13068 : if (!(event->ate_flags &
4564 : (AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS)))
4565 : {
4566 : /*
4567 : * This trigger hasn't been called or scheduled yet. Check if we
4568 : * should call it now.
4569 : */
4570 12234 : if (immediate_only && afterTriggerCheckState(evtshared))
4571 : {
4572 584 : defer_it = true;
4573 : }
4574 : else
4575 : {
4576 : /*
4577 : * Mark it as to be fired in this firing cycle.
4578 : */
4579 11650 : evtshared->ats_firing_id = afterTriggers.firing_counter;
4580 11650 : event->ate_flags |= AFTER_TRIGGER_IN_PROGRESS;
4581 11650 : found = true;
4582 : }
4583 : }
4584 :
4585 : /*
4586 : * If it's deferred, move it to move_list, if requested.
4587 : */
4588 13068 : if (defer_it && move_list != NULL)
4589 : {
4590 584 : deferred_found = true;
4591 : /* add it to move_list */
4592 584 : afterTriggerAddEvent(move_list, event, evtshared);
4593 : /* mark original copy "done" so we don't do it again */
4594 584 : event->ate_flags |= AFTER_TRIGGER_DONE;
4595 : }
4596 : }
4597 :
4598 : /*
4599 : * We could allow deferred triggers if, before the end of the
4600 : * security-restricted operation, we were to verify that a SET CONSTRAINTS
4601 : * ... IMMEDIATE has fired all such triggers. For now, don't bother.
4602 : */
4603 722492 : if (deferred_found && InSecurityRestrictedOperation())
4604 12 : ereport(ERROR,
4605 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
4606 : errmsg("cannot fire deferred trigger within security-restricted operation")));
4607 :
4608 722480 : return found;
4609 : }
4610 :
4611 : /*
4612 : * afterTriggerInvokeEvents()
4613 : *
4614 : * Scan the given event list for events that are marked as to be fired
4615 : * in the current firing cycle, and fire them.
4616 : *
4617 : * If estate isn't NULL, we use its result relation info to avoid repeated
4618 : * openings and closing of trigger target relations. If it is NULL, we
4619 : * make one locally to cache the info in case there are multiple trigger
4620 : * events per rel.
4621 : *
4622 : * When delete_ok is true, it's safe to delete fully-processed events.
4623 : * (We are not very tense about that: we simply reset a chunk to be empty
4624 : * if all its events got fired. The objective here is just to avoid useless
4625 : * rescanning of events when a trigger queues new events during transaction
4626 : * end, so it's not necessary to worry much about the case where only
4627 : * some events are fired.)
4628 : *
4629 : * Returns true if no unfired events remain in the list (this allows us
4630 : * to avoid repeating afterTriggerMarkEvents).
4631 : */
4632 : static bool
4633 7542 : afterTriggerInvokeEvents(AfterTriggerEventList *events,
4634 : CommandId firing_id,
4635 : EState *estate,
4636 : bool delete_ok)
4637 : {
4638 7542 : bool all_fired = true;
4639 : AfterTriggerEventChunk *chunk;
4640 : MemoryContext per_tuple_context;
4641 7542 : bool local_estate = false;
4642 7542 : ResultRelInfo *rInfo = NULL;
4643 7542 : Relation rel = NULL;
4644 7542 : TriggerDesc *trigdesc = NULL;
4645 7542 : FmgrInfo *finfo = NULL;
4646 7542 : Instrumentation *instr = NULL;
4647 7542 : TupleTableSlot *slot1 = NULL,
4648 7542 : *slot2 = NULL;
4649 :
4650 : /* Make a local EState if need be */
4651 7542 : if (estate == NULL)
4652 : {
4653 336 : estate = CreateExecutorState();
4654 336 : local_estate = true;
4655 : }
4656 :
4657 : /* Make a per-tuple memory context for trigger function calls */
4658 : per_tuple_context =
4659 7542 : AllocSetContextCreate(CurrentMemoryContext,
4660 : "AfterTriggerTupleContext",
4661 : ALLOCSET_DEFAULT_SIZES);
4662 :
4663 13848 : for_each_chunk(chunk, *events)
4664 : {
4665 : AfterTriggerEvent event;
4666 7542 : bool all_fired_in_chunk = true;
4667 :
4668 19226 : for_each_event(event, chunk)
4669 : {
4670 12920 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
4671 :
4672 : /*
4673 : * Is it one for me to fire?
4674 : */
4675 12920 : if ((event->ate_flags & AFTER_TRIGGER_IN_PROGRESS) &&
4676 11450 : evtshared->ats_firing_id == firing_id)
4677 10214 : {
4678 : ResultRelInfo *src_rInfo,
4679 : *dst_rInfo;
4680 :
4681 : /*
4682 : * So let's fire it... but first, find the correct relation if
4683 : * this is not the same relation as before.
4684 : */
4685 11450 : if (rel == NULL || RelationGetRelid(rel) != evtshared->ats_relid)
4686 : {
4687 7844 : rInfo = ExecGetTriggerResultRel(estate, evtshared->ats_relid,
4688 : NULL);
4689 7844 : rel = rInfo->ri_RelationDesc;
4690 : /* Catch calls with insufficient relcache refcounting */
4691 : Assert(!RelationHasReferenceCountZero(rel));
4692 7844 : trigdesc = rInfo->ri_TrigDesc;
4693 : /* caution: trigdesc could be NULL here */
4694 7844 : finfo = rInfo->ri_TrigFunctions;
4695 7844 : instr = rInfo->ri_TrigInstrument;
4696 7844 : if (slot1 != NULL)
4697 : {
4698 0 : ExecDropSingleTupleTableSlot(slot1);
4699 0 : ExecDropSingleTupleTableSlot(slot2);
4700 0 : slot1 = slot2 = NULL;
4701 : }
4702 7844 : if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
4703 : {
4704 38 : slot1 = MakeSingleTupleTableSlot(rel->rd_att,
4705 : &TTSOpsMinimalTuple);
4706 38 : slot2 = MakeSingleTupleTableSlot(rel->rd_att,
4707 : &TTSOpsMinimalTuple);
4708 : }
4709 : }
4710 :
4711 : /*
4712 : * Look up source and destination partition result rels of a
4713 : * cross-partition update event.
4714 : */
4715 11450 : if ((event->ate_flags & AFTER_TRIGGER_TUP_BITS) ==
4716 : AFTER_TRIGGER_CP_UPDATE)
4717 : {
4718 : Assert(OidIsValid(event->ate_src_part) &&
4719 : OidIsValid(event->ate_dst_part));
4720 144 : src_rInfo = ExecGetTriggerResultRel(estate,
4721 : event->ate_src_part,
4722 : rInfo);
4723 144 : dst_rInfo = ExecGetTriggerResultRel(estate,
4724 : event->ate_dst_part,
4725 : rInfo);
4726 : }
4727 : else
4728 11306 : src_rInfo = dst_rInfo = rInfo;
4729 :
4730 : /*
4731 : * Fire it. Note that the AFTER_TRIGGER_IN_PROGRESS flag is
4732 : * still set, so recursive examinations of the event list
4733 : * won't try to re-fire it.
4734 : */
4735 11450 : AfterTriggerExecute(estate, event, rInfo,
4736 : src_rInfo, dst_rInfo,
4737 : trigdesc, finfo, instr,
4738 : per_tuple_context, slot1, slot2);
4739 :
4740 : /*
4741 : * Mark the event as done.
4742 : */
4743 10214 : event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
4744 10214 : event->ate_flags |= AFTER_TRIGGER_DONE;
4745 : }
4746 1470 : else if (!(event->ate_flags & AFTER_TRIGGER_DONE))
4747 : {
4748 : /* something remains to be done */
4749 510 : all_fired = all_fired_in_chunk = false;
4750 : }
4751 : }
4752 :
4753 : /* Clear the chunk if delete_ok and nothing left of interest */
4754 6306 : if (delete_ok && all_fired_in_chunk)
4755 : {
4756 172 : chunk->freeptr = CHUNK_DATA_START(chunk);
4757 172 : chunk->endfree = chunk->endptr;
4758 :
4759 : /*
4760 : * If it's last chunk, must sync event list's tailfree too. Note
4761 : * that delete_ok must NOT be passed as true if there could be
4762 : * additional AfterTriggerEventList values pointing at this event
4763 : * list, since we'd fail to fix their copies of tailfree.
4764 : */
4765 172 : if (chunk == events->tail)
4766 172 : events->tailfree = chunk->freeptr;
4767 : }
4768 : }
4769 6306 : if (slot1 != NULL)
4770 : {
4771 38 : ExecDropSingleTupleTableSlot(slot1);
4772 38 : ExecDropSingleTupleTableSlot(slot2);
4773 : }
4774 :
4775 : /* Release working resources */
4776 6306 : MemoryContextDelete(per_tuple_context);
4777 :
4778 6306 : if (local_estate)
4779 : {
4780 172 : ExecCloseResultRelations(estate);
4781 172 : ExecResetTupleTable(estate->es_tupleTable, false);
4782 172 : FreeExecutorState(estate);
4783 : }
4784 :
4785 6306 : return all_fired;
4786 : }
4787 :
4788 :
4789 : /*
4790 : * GetAfterTriggersTableData
4791 : *
4792 : * Find or create an AfterTriggersTableData struct for the specified
4793 : * trigger event (relation + operation type). Ignore existing structs
4794 : * marked "closed"; we don't want to put any additional tuples into them,
4795 : * nor change their stmt-triggers-fired state.
4796 : *
4797 : * Note: the AfterTriggersTableData list is allocated in the current
4798 : * (sub)transaction's CurTransactionContext. This is OK because
4799 : * we don't need it to live past AfterTriggerEndQuery.
4800 : */
4801 : static AfterTriggersTableData *
4802 2112 : GetAfterTriggersTableData(Oid relid, CmdType cmdType)
4803 : {
4804 : AfterTriggersTableData *table;
4805 : AfterTriggersQueryData *qs;
4806 : MemoryContext oldcxt;
4807 : ListCell *lc;
4808 :
4809 : /* Caller should have ensured query_depth is OK. */
4810 : Assert(afterTriggers.query_depth >= 0 &&
4811 : afterTriggers.query_depth < afterTriggers.maxquerydepth);
4812 2112 : qs = &afterTriggers.query_stack[afterTriggers.query_depth];
4813 :
4814 2460 : foreach(lc, qs->tables)
4815 : {
4816 1408 : table = (AfterTriggersTableData *) lfirst(lc);
4817 1408 : if (table->relid == relid && table->cmdType == cmdType &&
4818 1096 : !table->closed)
4819 1060 : return table;
4820 : }
4821 :
4822 1052 : oldcxt = MemoryContextSwitchTo(CurTransactionContext);
4823 :
4824 1052 : table = (AfterTriggersTableData *) palloc0(sizeof(AfterTriggersTableData));
4825 1052 : table->relid = relid;
4826 1052 : table->cmdType = cmdType;
4827 1052 : qs->tables = lappend(qs->tables, table);
4828 :
4829 1052 : MemoryContextSwitchTo(oldcxt);
4830 :
4831 1052 : return table;
4832 : }
4833 :
4834 : /*
4835 : * Returns a TupleTableSlot suitable for holding the tuples to be put
4836 : * into AfterTriggersTableData's transition table tuplestores.
4837 : */
4838 : static TupleTableSlot *
4839 294 : GetAfterTriggersStoreSlot(AfterTriggersTableData *table,
4840 : TupleDesc tupdesc)
4841 : {
4842 : /* Create it if not already done. */
4843 294 : if (!table->storeslot)
4844 : {
4845 : MemoryContext oldcxt;
4846 :
4847 : /*
4848 : * We need this slot only until AfterTriggerEndQuery, but making it
4849 : * last till end-of-subxact is good enough. It'll be freed by
4850 : * AfterTriggerFreeQuery(). However, the passed-in tupdesc might have
4851 : * a different lifespan, so we'd better make a copy of that.
4852 : */
4853 84 : oldcxt = MemoryContextSwitchTo(CurTransactionContext);
4854 84 : tupdesc = CreateTupleDescCopy(tupdesc);
4855 84 : table->storeslot = MakeSingleTupleTableSlot(tupdesc, &TTSOpsVirtual);
4856 84 : MemoryContextSwitchTo(oldcxt);
4857 : }
4858 :
4859 294 : return table->storeslot;
4860 : }
4861 :
4862 : /*
4863 : * MakeTransitionCaptureState
4864 : *
4865 : * Make a TransitionCaptureState object for the given TriggerDesc, target
4866 : * relation, and operation type. The TCS object holds all the state needed
4867 : * to decide whether to capture tuples in transition tables.
4868 : *
4869 : * If there are no triggers in 'trigdesc' that request relevant transition
4870 : * tables, then return NULL.
4871 : *
4872 : * The resulting object can be passed to the ExecAR* functions. When
4873 : * dealing with child tables, the caller can set tcs_original_insert_tuple
4874 : * to avoid having to reconstruct the original tuple in the root table's
4875 : * format.
4876 : *
4877 : * Note that we copy the flags from a parent table into this struct (rather
4878 : * than subsequently using the relation's TriggerDesc directly) so that we can
4879 : * use it to control collection of transition tuples from child tables.
4880 : *
4881 : * Per SQL spec, all operations of the same kind (INSERT/UPDATE/DELETE)
4882 : * on the same table during one query should share one transition table.
4883 : * Therefore, the Tuplestores are owned by an AfterTriggersTableData struct
4884 : * looked up using the table OID + CmdType, and are merely referenced by
4885 : * the TransitionCaptureState objects we hand out to callers.
4886 : */
4887 : TransitionCaptureState *
4888 120806 : MakeTransitionCaptureState(TriggerDesc *trigdesc, Oid relid, CmdType cmdType)
4889 : {
4890 : TransitionCaptureState *state;
4891 : bool need_old_upd,
4892 : need_new_upd,
4893 : need_old_del,
4894 : need_new_ins;
4895 : AfterTriggersTableData *table;
4896 : MemoryContext oldcxt;
4897 : ResourceOwner saveResourceOwner;
4898 :
4899 120806 : if (trigdesc == NULL)
4900 108014 : return NULL;
4901 :
4902 : /* Detect which table(s) we need. */
4903 12792 : switch (cmdType)
4904 : {
4905 7046 : case CMD_INSERT:
4906 7046 : need_old_upd = need_old_del = need_new_upd = false;
4907 7046 : need_new_ins = trigdesc->trig_insert_new_table;
4908 7046 : break;
4909 3884 : case CMD_UPDATE:
4910 3884 : need_old_upd = trigdesc->trig_update_old_table;
4911 3884 : need_new_upd = trigdesc->trig_update_new_table;
4912 3884 : need_old_del = need_new_ins = false;
4913 3884 : break;
4914 1540 : case CMD_DELETE:
4915 1540 : need_old_del = trigdesc->trig_delete_old_table;
4916 1540 : need_old_upd = need_new_upd = need_new_ins = false;
4917 1540 : break;
4918 322 : case CMD_MERGE:
4919 322 : need_old_upd = trigdesc->trig_update_old_table;
4920 322 : need_new_upd = trigdesc->trig_update_new_table;
4921 322 : need_old_del = trigdesc->trig_delete_old_table;
4922 322 : need_new_ins = trigdesc->trig_insert_new_table;
4923 322 : break;
4924 0 : default:
4925 0 : elog(ERROR, "unexpected CmdType: %d", (int) cmdType);
4926 : /* keep compiler quiet */
4927 : need_old_upd = need_new_upd = need_old_del = need_new_ins = false;
4928 : break;
4929 : }
4930 12792 : if (!need_old_upd && !need_new_upd && !need_new_ins && !need_old_del)
4931 12240 : return NULL;
4932 :
4933 : /* Check state, like AfterTriggerSaveEvent. */
4934 552 : if (afterTriggers.query_depth < 0)
4935 0 : elog(ERROR, "MakeTransitionCaptureState() called outside of query");
4936 :
4937 : /* Be sure we have enough space to record events at this query depth. */
4938 552 : if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
4939 408 : AfterTriggerEnlargeQueryState();
4940 :
4941 : /*
4942 : * Find or create an AfterTriggersTableData struct to hold the
4943 : * tuplestore(s). If there's a matching struct but it's marked closed,
4944 : * ignore it; we need a newer one.
4945 : *
4946 : * Note: the AfterTriggersTableData list, as well as the tuplestores, are
4947 : * allocated in the current (sub)transaction's CurTransactionContext, and
4948 : * the tuplestores are managed by the (sub)transaction's resource owner.
4949 : * This is sufficient lifespan because we do not allow triggers using
4950 : * transition tables to be deferrable; they will be fired during
4951 : * AfterTriggerEndQuery, after which it's okay to delete the data.
4952 : */
4953 552 : table = GetAfterTriggersTableData(relid, cmdType);
4954 :
4955 : /* Now create required tuplestore(s), if we don't have them already. */
4956 552 : oldcxt = MemoryContextSwitchTo(CurTransactionContext);
4957 552 : saveResourceOwner = CurrentResourceOwner;
4958 552 : CurrentResourceOwner = CurTransactionResourceOwner;
4959 :
4960 552 : if (need_old_upd && table->old_upd_tuplestore == NULL)
4961 162 : table->old_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4962 552 : if (need_new_upd && table->new_upd_tuplestore == NULL)
4963 174 : table->new_upd_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4964 552 : if (need_old_del && table->old_del_tuplestore == NULL)
4965 132 : table->old_del_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4966 552 : if (need_new_ins && table->new_ins_tuplestore == NULL)
4967 210 : table->new_ins_tuplestore = tuplestore_begin_heap(false, false, work_mem);
4968 :
4969 552 : CurrentResourceOwner = saveResourceOwner;
4970 552 : MemoryContextSwitchTo(oldcxt);
4971 :
4972 : /* Now build the TransitionCaptureState struct, in caller's context */
4973 552 : state = (TransitionCaptureState *) palloc0(sizeof(TransitionCaptureState));
4974 552 : state->tcs_delete_old_table = trigdesc->trig_delete_old_table;
4975 552 : state->tcs_update_old_table = trigdesc->trig_update_old_table;
4976 552 : state->tcs_update_new_table = trigdesc->trig_update_new_table;
4977 552 : state->tcs_insert_new_table = trigdesc->trig_insert_new_table;
4978 552 : state->tcs_private = table;
4979 :
4980 552 : return state;
4981 : }
4982 :
4983 :
4984 : /* ----------
4985 : * AfterTriggerBeginXact()
4986 : *
4987 : * Called at transaction start (either BEGIN or implicit for single
4988 : * statement outside of transaction block).
4989 : * ----------
4990 : */
4991 : void
4992 748364 : AfterTriggerBeginXact(void)
4993 : {
4994 : /*
4995 : * Initialize after-trigger state structure to empty
4996 : */
4997 748364 : afterTriggers.firing_counter = (CommandId) 1; /* mustn't be 0 */
4998 748364 : afterTriggers.query_depth = -1;
4999 :
5000 : /*
5001 : * Verify that there is no leftover state remaining. If these assertions
5002 : * trip, it means that AfterTriggerEndXact wasn't called or didn't clean
5003 : * up properly.
5004 : */
5005 : Assert(afterTriggers.state == NULL);
5006 : Assert(afterTriggers.query_stack == NULL);
5007 : Assert(afterTriggers.maxquerydepth == 0);
5008 : Assert(afterTriggers.event_cxt == NULL);
5009 : Assert(afterTriggers.events.head == NULL);
5010 : Assert(afterTriggers.trans_stack == NULL);
5011 : Assert(afterTriggers.maxtransdepth == 0);
5012 748364 : }
5013 :
5014 :
5015 : /* ----------
5016 : * AfterTriggerBeginQuery()
5017 : *
5018 : * Called just before we start processing a single query within a
5019 : * transaction (or subtransaction). Most of the real work gets deferred
5020 : * until somebody actually tries to queue a trigger event.
5021 : * ----------
5022 : */
5023 : void
5024 418668 : AfterTriggerBeginQuery(void)
5025 : {
5026 : /* Increase the query stack depth */
5027 418668 : afterTriggers.query_depth++;
5028 418668 : }
5029 :
5030 :
5031 : /* ----------
5032 : * AfterTriggerEndQuery()
5033 : *
5034 : * Called after one query has been completely processed. At this time
5035 : * we invoke all AFTER IMMEDIATE trigger events queued by the query, and
5036 : * transfer deferred trigger events to the global deferred-trigger list.
5037 : *
5038 : * Note that this must be called BEFORE closing down the executor
5039 : * with ExecutorEnd, because we make use of the EState's info about
5040 : * target relations. Normally it is called from ExecutorFinish.
5041 : * ----------
5042 : */
5043 : void
5044 414068 : AfterTriggerEndQuery(EState *estate)
5045 : {
5046 : AfterTriggersQueryData *qs;
5047 :
5048 : /* Must be inside a query, too */
5049 : Assert(afterTriggers.query_depth >= 0);
5050 :
5051 : /*
5052 : * If we never even got as far as initializing the event stack, there
5053 : * certainly won't be any events, so exit quickly.
5054 : */
5055 414068 : if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
5056 : {
5057 405322 : afterTriggers.query_depth--;
5058 405322 : return;
5059 : }
5060 :
5061 : /*
5062 : * Process all immediate-mode triggers queued by the query, and move the
5063 : * deferred ones to the main list of deferred events.
5064 : *
5065 : * Notice that we decide which ones will be fired, and put the deferred
5066 : * ones on the main list, before anything is actually fired. This ensures
5067 : * reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
5068 : * IMMEDIATE: all events we have decided to defer will be available for it
5069 : * to fire.
5070 : *
5071 : * We loop in case a trigger queues more events at the same query level.
5072 : * Ordinary trigger functions, including all PL/pgSQL trigger functions,
5073 : * will instead fire any triggers in a dedicated query level. Foreign key
5074 : * enforcement triggers do add to the current query level, thanks to their
5075 : * passing fire_triggers = false to SPI_execute_snapshot(). Other
5076 : * C-language triggers might do likewise.
5077 : *
5078 : * If we find no firable events, we don't have to increment
5079 : * firing_counter.
5080 : */
5081 8746 : qs = &afterTriggers.query_stack[afterTriggers.query_depth];
5082 :
5083 : for (;;)
5084 : {
5085 9046 : if (afterTriggerMarkEvents(&qs->events, &afterTriggers.events, true))
5086 : {
5087 7206 : CommandId firing_id = afterTriggers.firing_counter++;
5088 7206 : AfterTriggerEventChunk *oldtail = qs->events.tail;
5089 :
5090 7206 : if (afterTriggerInvokeEvents(&qs->events, firing_id, estate, false))
5091 5834 : break; /* all fired */
5092 :
5093 : /*
5094 : * Firing a trigger could result in query_stack being repalloc'd,
5095 : * so we must recalculate qs after each afterTriggerInvokeEvents
5096 : * call. Furthermore, it's unsafe to pass delete_ok = true here,
5097 : * because that could cause afterTriggerInvokeEvents to try to
5098 : * access qs->events after the stack has been repalloc'd.
5099 : */
5100 300 : qs = &afterTriggers.query_stack[afterTriggers.query_depth];
5101 :
5102 : /*
5103 : * We'll need to scan the events list again. To reduce the cost
5104 : * of doing so, get rid of completely-fired chunks. We know that
5105 : * all events were marked IN_PROGRESS or DONE at the conclusion of
5106 : * afterTriggerMarkEvents, so any still-interesting events must
5107 : * have been added after that, and so must be in the chunk that
5108 : * was then the tail chunk, or in later chunks. So, zap all
5109 : * chunks before oldtail. This is approximately the same set of
5110 : * events we would have gotten rid of by passing delete_ok = true.
5111 : */
5112 : Assert(oldtail != NULL);
5113 300 : while (qs->events.head != oldtail)
5114 0 : afterTriggerDeleteHeadEventChunk(qs);
5115 : }
5116 : else
5117 1828 : break;
5118 : }
5119 :
5120 : /* Release query-level-local storage, including tuplestores if any */
5121 7662 : AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
5122 :
5123 7662 : afterTriggers.query_depth--;
5124 : }
5125 :
5126 :
5127 : /*
5128 : * AfterTriggerFreeQuery
5129 : * Release subsidiary storage for a trigger query level.
5130 : * This includes closing down tuplestores.
5131 : * Note: it's important for this to be safe if interrupted by an error
5132 : * and then called again for the same query level.
5133 : */
5134 : static void
5135 7692 : AfterTriggerFreeQuery(AfterTriggersQueryData *qs)
5136 : {
5137 : Tuplestorestate *ts;
5138 : List *tables;
5139 : ListCell *lc;
5140 :
5141 : /* Drop the trigger events */
5142 7692 : afterTriggerFreeEventList(&qs->events);
5143 :
5144 : /* Drop FDW tuplestore if any */
5145 7692 : ts = qs->fdw_tuplestore;
5146 7692 : qs->fdw_tuplestore = NULL;
5147 7692 : if (ts)
5148 36 : tuplestore_end(ts);
5149 :
5150 : /* Release per-table subsidiary storage */
5151 7692 : tables = qs->tables;
5152 8692 : foreach(lc, tables)
5153 : {
5154 1000 : AfterTriggersTableData *table = (AfterTriggersTableData *) lfirst(lc);
5155 :
5156 1000 : ts = table->old_upd_tuplestore;
5157 1000 : table->old_upd_tuplestore = NULL;
5158 1000 : if (ts)
5159 150 : tuplestore_end(ts);
5160 1000 : ts = table->new_upd_tuplestore;
5161 1000 : table->new_upd_tuplestore = NULL;
5162 1000 : if (ts)
5163 156 : tuplestore_end(ts);
5164 1000 : ts = table->old_del_tuplestore;
5165 1000 : table->old_del_tuplestore = NULL;
5166 1000 : if (ts)
5167 120 : tuplestore_end(ts);
5168 1000 : ts = table->new_ins_tuplestore;
5169 1000 : table->new_ins_tuplestore = NULL;
5170 1000 : if (ts)
5171 204 : tuplestore_end(ts);
5172 1000 : if (table->storeslot)
5173 : {
5174 84 : TupleTableSlot *slot = table->storeslot;
5175 :
5176 84 : table->storeslot = NULL;
5177 84 : ExecDropSingleTupleTableSlot(slot);
5178 : }
5179 : }
5180 :
5181 : /*
5182 : * Now free the AfterTriggersTableData structs and list cells. Reset list
5183 : * pointer first; if list_free_deep somehow gets an error, better to leak
5184 : * that storage than have an infinite loop.
5185 : */
5186 7692 : qs->tables = NIL;
5187 7692 : list_free_deep(tables);
5188 7692 : }
5189 :
5190 :
5191 : /* ----------
5192 : * AfterTriggerFireDeferred()
5193 : *
5194 : * Called just before the current transaction is committed. At this
5195 : * time we invoke all pending DEFERRED triggers.
5196 : *
5197 : * It is possible for other modules to queue additional deferred triggers
5198 : * during pre-commit processing; therefore xact.c may have to call this
5199 : * multiple times.
5200 : * ----------
5201 : */
5202 : void
5203 713412 : AfterTriggerFireDeferred(void)
5204 : {
5205 : AfterTriggerEventList *events;
5206 713412 : bool snap_pushed = false;
5207 :
5208 : /* Must not be inside a query */
5209 : Assert(afterTriggers.query_depth == -1);
5210 :
5211 : /*
5212 : * If there are any triggers to fire, make sure we have set a snapshot for
5213 : * them to use. (Since PortalRunUtility doesn't set a snap for COMMIT, we
5214 : * can't assume ActiveSnapshot is valid on entry.)
5215 : */
5216 713412 : events = &afterTriggers.events;
5217 713412 : if (events->head != NULL)
5218 : {
5219 320 : PushActiveSnapshot(GetTransactionSnapshot());
5220 320 : snap_pushed = true;
5221 : }
5222 :
5223 : /*
5224 : * Run all the remaining triggers. Loop until they are all gone, in case
5225 : * some trigger queues more for us to do.
5226 : */
5227 713412 : while (afterTriggerMarkEvents(events, NULL, false))
5228 : {
5229 320 : CommandId firing_id = afterTriggers.firing_counter++;
5230 :
5231 320 : if (afterTriggerInvokeEvents(events, firing_id, NULL, true))
5232 172 : break; /* all fired */
5233 : }
5234 :
5235 : /*
5236 : * We don't bother freeing the event list, since it will go away anyway
5237 : * (and more efficiently than via pfree) in AfterTriggerEndXact.
5238 : */
5239 :
5240 713264 : if (snap_pushed)
5241 172 : PopActiveSnapshot();
5242 713264 : }
5243 :
5244 :
5245 : /* ----------
5246 : * AfterTriggerEndXact()
5247 : *
5248 : * The current transaction is finishing.
5249 : *
5250 : * Any unfired triggers are canceled so we simply throw
5251 : * away anything we know.
5252 : *
5253 : * Note: it is possible for this to be called repeatedly in case of
5254 : * error during transaction abort; therefore, do not complain if
5255 : * already closed down.
5256 : * ----------
5257 : */
5258 : void
5259 748776 : AfterTriggerEndXact(bool isCommit)
5260 : {
5261 : /*
5262 : * Forget the pending-events list.
5263 : *
5264 : * Since all the info is in TopTransactionContext or children thereof, we
5265 : * don't really need to do anything to reclaim memory. However, the
5266 : * pending-events list could be large, and so it's useful to discard it as
5267 : * soon as possible --- especially if we are aborting because we ran out
5268 : * of memory for the list!
5269 : */
5270 748776 : if (afterTriggers.event_cxt)
5271 : {
5272 6558 : MemoryContextDelete(afterTriggers.event_cxt);
5273 6558 : afterTriggers.event_cxt = NULL;
5274 6558 : afterTriggers.events.head = NULL;
5275 6558 : afterTriggers.events.tail = NULL;
5276 6558 : afterTriggers.events.tailfree = NULL;
5277 : }
5278 :
5279 : /*
5280 : * Forget any subtransaction state as well. Since this can't be very
5281 : * large, we let the eventual reset of TopTransactionContext free the
5282 : * memory instead of doing it here.
5283 : */
5284 748776 : afterTriggers.trans_stack = NULL;
5285 748776 : afterTriggers.maxtransdepth = 0;
5286 :
5287 :
5288 : /*
5289 : * Forget the query stack and constraint-related state information. As
5290 : * with the subtransaction state information, we don't bother freeing the
5291 : * memory here.
5292 : */
5293 748776 : afterTriggers.query_stack = NULL;
5294 748776 : afterTriggers.maxquerydepth = 0;
5295 748776 : afterTriggers.state = NULL;
5296 :
5297 : /* No more afterTriggers manipulation until next transaction starts. */
5298 748776 : afterTriggers.query_depth = -1;
5299 748776 : }
5300 :
5301 : /*
5302 : * AfterTriggerBeginSubXact()
5303 : *
5304 : * Start a subtransaction.
5305 : */
5306 : void
5307 19988 : AfterTriggerBeginSubXact(void)
5308 : {
5309 19988 : int my_level = GetCurrentTransactionNestLevel();
5310 :
5311 : /*
5312 : * Allocate more space in the trans_stack if needed. (Note: because the
5313 : * minimum nest level of a subtransaction is 2, we waste the first couple
5314 : * entries of the array; not worth the notational effort to avoid it.)
5315 : */
5316 22772 : while (my_level >= afterTriggers.maxtransdepth)
5317 : {
5318 2784 : if (afterTriggers.maxtransdepth == 0)
5319 : {
5320 : /* Arbitrarily initialize for max of 8 subtransaction levels */
5321 2700 : afterTriggers.trans_stack = (AfterTriggersTransData *)
5322 2700 : MemoryContextAlloc(TopTransactionContext,
5323 : 8 * sizeof(AfterTriggersTransData));
5324 2700 : afterTriggers.maxtransdepth = 8;
5325 : }
5326 : else
5327 : {
5328 : /* repalloc will keep the stack in the same context */
5329 84 : int new_alloc = afterTriggers.maxtransdepth * 2;
5330 :
5331 84 : afterTriggers.trans_stack = (AfterTriggersTransData *)
5332 84 : repalloc(afterTriggers.trans_stack,
5333 : new_alloc * sizeof(AfterTriggersTransData));
5334 84 : afterTriggers.maxtransdepth = new_alloc;
5335 : }
5336 : }
5337 :
5338 : /*
5339 : * Push the current information into the stack. The SET CONSTRAINTS state
5340 : * is not saved until/unless changed. Likewise, we don't make a
5341 : * per-subtransaction event context until needed.
5342 : */
5343 19988 : afterTriggers.trans_stack[my_level].state = NULL;
5344 19988 : afterTriggers.trans_stack[my_level].events = afterTriggers.events;
5345 19988 : afterTriggers.trans_stack[my_level].query_depth = afterTriggers.query_depth;
5346 19988 : afterTriggers.trans_stack[my_level].firing_counter = afterTriggers.firing_counter;
5347 19988 : }
5348 :
5349 : /*
5350 : * AfterTriggerEndSubXact()
5351 : *
5352 : * The current subtransaction is ending.
5353 : */
5354 : void
5355 19988 : AfterTriggerEndSubXact(bool isCommit)
5356 : {
5357 19988 : int my_level = GetCurrentTransactionNestLevel();
5358 : SetConstraintState state;
5359 : AfterTriggerEvent event;
5360 : AfterTriggerEventChunk *chunk;
5361 : CommandId subxact_firing_id;
5362 :
5363 : /*
5364 : * Pop the prior state if needed.
5365 : */
5366 19988 : if (isCommit)
5367 : {
5368 : Assert(my_level < afterTriggers.maxtransdepth);
5369 : /* If we saved a prior state, we don't need it anymore */
5370 10720 : state = afterTriggers.trans_stack[my_level].state;
5371 10720 : if (state != NULL)
5372 6 : pfree(state);
5373 : /* this avoids double pfree if error later: */
5374 10720 : afterTriggers.trans_stack[my_level].state = NULL;
5375 : Assert(afterTriggers.query_depth ==
5376 : afterTriggers.trans_stack[my_level].query_depth);
5377 : }
5378 : else
5379 : {
5380 : /*
5381 : * Aborting. It is possible subxact start failed before calling
5382 : * AfterTriggerBeginSubXact, in which case we mustn't risk touching
5383 : * trans_stack levels that aren't there.
5384 : */
5385 9268 : if (my_level >= afterTriggers.maxtransdepth)
5386 0 : return;
5387 :
5388 : /*
5389 : * Release query-level storage for queries being aborted, and restore
5390 : * query_depth to its pre-subxact value. This assumes that a
5391 : * subtransaction will not add events to query levels started in a
5392 : * earlier transaction state.
5393 : */
5394 9362 : while (afterTriggers.query_depth > afterTriggers.trans_stack[my_level].query_depth)
5395 : {
5396 94 : if (afterTriggers.query_depth < afterTriggers.maxquerydepth)
5397 30 : AfterTriggerFreeQuery(&afterTriggers.query_stack[afterTriggers.query_depth]);
5398 94 : afterTriggers.query_depth--;
5399 : }
5400 : Assert(afterTriggers.query_depth ==
5401 : afterTriggers.trans_stack[my_level].query_depth);
5402 :
5403 : /*
5404 : * Restore the global deferred-event list to its former length,
5405 : * discarding any events queued by the subxact.
5406 : */
5407 9268 : afterTriggerRestoreEventList(&afterTriggers.events,
5408 9268 : &afterTriggers.trans_stack[my_level].events);
5409 :
5410 : /*
5411 : * Restore the trigger state. If the saved state is NULL, then this
5412 : * subxact didn't save it, so it doesn't need restoring.
5413 : */
5414 9268 : state = afterTriggers.trans_stack[my_level].state;
5415 9268 : if (state != NULL)
5416 : {
5417 4 : pfree(afterTriggers.state);
5418 4 : afterTriggers.state = state;
5419 : }
5420 : /* this avoids double pfree if error later: */
5421 9268 : afterTriggers.trans_stack[my_level].state = NULL;
5422 :
5423 : /*
5424 : * Scan for any remaining deferred events that were marked DONE or IN
5425 : * PROGRESS by this subxact or a child, and un-mark them. We can
5426 : * recognize such events because they have a firing ID greater than or
5427 : * equal to the firing_counter value we saved at subtransaction start.
5428 : * (This essentially assumes that the current subxact includes all
5429 : * subxacts started after it.)
5430 : */
5431 9268 : subxact_firing_id = afterTriggers.trans_stack[my_level].firing_counter;
5432 9312 : for_each_event_chunk(event, chunk, afterTriggers.events)
5433 : {
5434 22 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
5435 :
5436 22 : if (event->ate_flags &
5437 : (AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS))
5438 : {
5439 4 : if (evtshared->ats_firing_id >= subxact_firing_id)
5440 4 : event->ate_flags &=
5441 : ~(AFTER_TRIGGER_DONE | AFTER_TRIGGER_IN_PROGRESS);
5442 : }
5443 : }
5444 : }
5445 : }
5446 :
5447 : /*
5448 : * Get the transition table for the given event and depending on whether we are
5449 : * processing the old or the new tuple.
5450 : */
5451 : static Tuplestorestate *
5452 66084 : GetAfterTriggersTransitionTable(int event,
5453 : TupleTableSlot *oldslot,
5454 : TupleTableSlot *newslot,
5455 : TransitionCaptureState *transition_capture)
5456 : {
5457 66084 : Tuplestorestate *tuplestore = NULL;
5458 66084 : bool delete_old_table = transition_capture->tcs_delete_old_table;
5459 66084 : bool update_old_table = transition_capture->tcs_update_old_table;
5460 66084 : bool update_new_table = transition_capture->tcs_update_new_table;
5461 66084 : bool insert_new_table = transition_capture->tcs_insert_new_table;
5462 :
5463 : /*
5464 : * For INSERT events NEW should be non-NULL, for DELETE events OLD should
5465 : * be non-NULL, whereas for UPDATE events normally both OLD and NEW are
5466 : * non-NULL. But for UPDATE events fired for capturing transition tuples
5467 : * during UPDATE partition-key row movement, OLD is NULL when the event is
5468 : * for a row being inserted, whereas NEW is NULL when the event is for a
5469 : * row being deleted.
5470 : */
5471 : Assert(!(event == TRIGGER_EVENT_DELETE && delete_old_table &&
5472 : TupIsNull(oldslot)));
5473 : Assert(!(event == TRIGGER_EVENT_INSERT && insert_new_table &&
5474 : TupIsNull(newslot)));
5475 :
5476 66084 : if (!TupIsNull(oldslot))
5477 : {
5478 : Assert(TupIsNull(newslot));
5479 5394 : if (event == TRIGGER_EVENT_DELETE && delete_old_table)
5480 5040 : tuplestore = transition_capture->tcs_private->old_del_tuplestore;
5481 354 : else if (event == TRIGGER_EVENT_UPDATE && update_old_table)
5482 330 : tuplestore = transition_capture->tcs_private->old_upd_tuplestore;
5483 : }
5484 60690 : else if (!TupIsNull(newslot))
5485 : {
5486 : Assert(TupIsNull(oldslot));
5487 60690 : if (event == TRIGGER_EVENT_INSERT && insert_new_table)
5488 60336 : tuplestore = transition_capture->tcs_private->new_ins_tuplestore;
5489 354 : else if (event == TRIGGER_EVENT_UPDATE && update_new_table)
5490 348 : tuplestore = transition_capture->tcs_private->new_upd_tuplestore;
5491 : }
5492 :
5493 66084 : return tuplestore;
5494 : }
5495 :
5496 : /*
5497 : * Add the given heap tuple to the given tuplestore, applying the conversion
5498 : * map if necessary.
5499 : *
5500 : * If original_insert_tuple is given, we can add that tuple without conversion.
5501 : */
5502 : static void
5503 66084 : TransitionTableAddTuple(EState *estate,
5504 : TransitionCaptureState *transition_capture,
5505 : ResultRelInfo *relinfo,
5506 : TupleTableSlot *slot,
5507 : TupleTableSlot *original_insert_tuple,
5508 : Tuplestorestate *tuplestore)
5509 : {
5510 : TupleConversionMap *map;
5511 :
5512 : /*
5513 : * Nothing needs to be done if we don't have a tuplestore.
5514 : */
5515 66084 : if (tuplestore == NULL)
5516 30 : return;
5517 :
5518 66054 : if (original_insert_tuple)
5519 126 : tuplestore_puttupleslot(tuplestore, original_insert_tuple);
5520 65928 : else if ((map = ExecGetChildToRootMap(relinfo)) != NULL)
5521 : {
5522 294 : AfterTriggersTableData *table = transition_capture->tcs_private;
5523 : TupleTableSlot *storeslot;
5524 :
5525 294 : storeslot = GetAfterTriggersStoreSlot(table, map->outdesc);
5526 294 : execute_attr_map_slot(map->attrMap, slot, storeslot);
5527 294 : tuplestore_puttupleslot(tuplestore, storeslot);
5528 : }
5529 : else
5530 65634 : tuplestore_puttupleslot(tuplestore, slot);
5531 : }
5532 :
5533 : /* ----------
5534 : * AfterTriggerEnlargeQueryState()
5535 : *
5536 : * Prepare the necessary state so that we can record AFTER trigger events
5537 : * queued by a query. It is allowed to have nested queries within a
5538 : * (sub)transaction, so we need to have separate state for each query
5539 : * nesting level.
5540 : * ----------
5541 : */
5542 : static void
5543 6894 : AfterTriggerEnlargeQueryState(void)
5544 : {
5545 6894 : int init_depth = afterTriggers.maxquerydepth;
5546 :
5547 : Assert(afterTriggers.query_depth >= afterTriggers.maxquerydepth);
5548 :
5549 6894 : if (afterTriggers.maxquerydepth == 0)
5550 : {
5551 6894 : int new_alloc = Max(afterTriggers.query_depth + 1, 8);
5552 :
5553 6894 : afterTriggers.query_stack = (AfterTriggersQueryData *)
5554 6894 : MemoryContextAlloc(TopTransactionContext,
5555 : new_alloc * sizeof(AfterTriggersQueryData));
5556 6894 : afterTriggers.maxquerydepth = new_alloc;
5557 : }
5558 : else
5559 : {
5560 : /* repalloc will keep the stack in the same context */
5561 0 : int old_alloc = afterTriggers.maxquerydepth;
5562 0 : int new_alloc = Max(afterTriggers.query_depth + 1,
5563 : old_alloc * 2);
5564 :
5565 0 : afterTriggers.query_stack = (AfterTriggersQueryData *)
5566 0 : repalloc(afterTriggers.query_stack,
5567 : new_alloc * sizeof(AfterTriggersQueryData));
5568 0 : afterTriggers.maxquerydepth = new_alloc;
5569 : }
5570 :
5571 : /* Initialize new array entries to empty */
5572 62046 : while (init_depth < afterTriggers.maxquerydepth)
5573 : {
5574 55152 : AfterTriggersQueryData *qs = &afterTriggers.query_stack[init_depth];
5575 :
5576 55152 : qs->events.head = NULL;
5577 55152 : qs->events.tail = NULL;
5578 55152 : qs->events.tailfree = NULL;
5579 55152 : qs->fdw_tuplestore = NULL;
5580 55152 : qs->tables = NIL;
5581 :
5582 55152 : ++init_depth;
5583 : }
5584 6894 : }
5585 :
5586 : /*
5587 : * Create an empty SetConstraintState with room for numalloc trigstates
5588 : */
5589 : static SetConstraintState
5590 96 : SetConstraintStateCreate(int numalloc)
5591 : {
5592 : SetConstraintState state;
5593 :
5594 : /* Behave sanely with numalloc == 0 */
5595 96 : if (numalloc <= 0)
5596 10 : numalloc = 1;
5597 :
5598 : /*
5599 : * We assume that zeroing will correctly initialize the state values.
5600 : */
5601 : state = (SetConstraintState)
5602 96 : MemoryContextAllocZero(TopTransactionContext,
5603 : offsetof(SetConstraintStateData, trigstates) +
5604 96 : numalloc * sizeof(SetConstraintTriggerData));
5605 :
5606 96 : state->numalloc = numalloc;
5607 :
5608 96 : return state;
5609 : }
5610 :
5611 : /*
5612 : * Copy a SetConstraintState
5613 : */
5614 : static SetConstraintState
5615 10 : SetConstraintStateCopy(SetConstraintState origstate)
5616 : {
5617 : SetConstraintState state;
5618 :
5619 10 : state = SetConstraintStateCreate(origstate->numstates);
5620 :
5621 10 : state->all_isset = origstate->all_isset;
5622 10 : state->all_isdeferred = origstate->all_isdeferred;
5623 10 : state->numstates = origstate->numstates;
5624 10 : memcpy(state->trigstates, origstate->trigstates,
5625 10 : origstate->numstates * sizeof(SetConstraintTriggerData));
5626 :
5627 10 : return state;
5628 : }
5629 :
5630 : /*
5631 : * Add a per-trigger item to a SetConstraintState. Returns possibly-changed
5632 : * pointer to the state object (it will change if we have to repalloc).
5633 : */
5634 : static SetConstraintState
5635 342 : SetConstraintStateAddItem(SetConstraintState state,
5636 : Oid tgoid, bool tgisdeferred)
5637 : {
5638 342 : if (state->numstates >= state->numalloc)
5639 : {
5640 30 : int newalloc = state->numalloc * 2;
5641 :
5642 30 : newalloc = Max(newalloc, 8); /* in case original has size 0 */
5643 : state = (SetConstraintState)
5644 30 : repalloc(state,
5645 : offsetof(SetConstraintStateData, trigstates) +
5646 30 : newalloc * sizeof(SetConstraintTriggerData));
5647 30 : state->numalloc = newalloc;
5648 : Assert(state->numstates < state->numalloc);
5649 : }
5650 :
5651 342 : state->trigstates[state->numstates].sct_tgoid = tgoid;
5652 342 : state->trigstates[state->numstates].sct_tgisdeferred = tgisdeferred;
5653 342 : state->numstates++;
5654 :
5655 342 : return state;
5656 : }
5657 :
5658 : /* ----------
5659 : * AfterTriggerSetState()
5660 : *
5661 : * Execute the SET CONSTRAINTS ... utility command.
5662 : * ----------
5663 : */
5664 : void
5665 102 : AfterTriggerSetState(ConstraintsSetStmt *stmt)
5666 : {
5667 102 : int my_level = GetCurrentTransactionNestLevel();
5668 :
5669 : /* If we haven't already done so, initialize our state. */
5670 102 : if (afterTriggers.state == NULL)
5671 86 : afterTriggers.state = SetConstraintStateCreate(8);
5672 :
5673 : /*
5674 : * If in a subtransaction, and we didn't save the current state already,
5675 : * save it so it can be restored if the subtransaction aborts.
5676 : */
5677 102 : if (my_level > 1 &&
5678 10 : afterTriggers.trans_stack[my_level].state == NULL)
5679 : {
5680 10 : afterTriggers.trans_stack[my_level].state =
5681 10 : SetConstraintStateCopy(afterTriggers.state);
5682 : }
5683 :
5684 : /*
5685 : * Handle SET CONSTRAINTS ALL ...
5686 : */
5687 102 : if (stmt->constraints == NIL)
5688 : {
5689 : /*
5690 : * Forget any previous SET CONSTRAINTS commands in this transaction.
5691 : */
5692 54 : afterTriggers.state->numstates = 0;
5693 :
5694 : /*
5695 : * Set the per-transaction ALL state to known.
5696 : */
5697 54 : afterTriggers.state->all_isset = true;
5698 54 : afterTriggers.state->all_isdeferred = stmt->deferred;
5699 : }
5700 : else
5701 : {
5702 : Relation conrel;
5703 : Relation tgrel;
5704 48 : List *conoidlist = NIL;
5705 48 : List *tgoidlist = NIL;
5706 : ListCell *lc;
5707 :
5708 : /*
5709 : * Handle SET CONSTRAINTS constraint-name [, ...]
5710 : *
5711 : * First, identify all the named constraints and make a list of their
5712 : * OIDs. Since, unlike the SQL spec, we allow multiple constraints of
5713 : * the same name within a schema, the specifications are not
5714 : * necessarily unique. Our strategy is to target all matching
5715 : * constraints within the first search-path schema that has any
5716 : * matches, but disregard matches in schemas beyond the first match.
5717 : * (This is a bit odd but it's the historical behavior.)
5718 : *
5719 : * A constraint in a partitioned table may have corresponding
5720 : * constraints in the partitions. Grab those too.
5721 : */
5722 48 : conrel = table_open(ConstraintRelationId, AccessShareLock);
5723 :
5724 96 : foreach(lc, stmt->constraints)
5725 : {
5726 48 : RangeVar *constraint = lfirst(lc);
5727 : bool found;
5728 : List *namespacelist;
5729 : ListCell *nslc;
5730 :
5731 48 : if (constraint->catalogname)
5732 : {
5733 0 : if (strcmp(constraint->catalogname, get_database_name(MyDatabaseId)) != 0)
5734 0 : ereport(ERROR,
5735 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5736 : errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
5737 : constraint->catalogname, constraint->schemaname,
5738 : constraint->relname)));
5739 : }
5740 :
5741 : /*
5742 : * If we're given the schema name with the constraint, look only
5743 : * in that schema. If given a bare constraint name, use the
5744 : * search path to find the first matching constraint.
5745 : */
5746 48 : if (constraint->schemaname)
5747 : {
5748 12 : Oid namespaceId = LookupExplicitNamespace(constraint->schemaname,
5749 : false);
5750 :
5751 12 : namespacelist = list_make1_oid(namespaceId);
5752 : }
5753 : else
5754 : {
5755 36 : namespacelist = fetch_search_path(true);
5756 : }
5757 :
5758 48 : found = false;
5759 120 : foreach(nslc, namespacelist)
5760 : {
5761 120 : Oid namespaceId = lfirst_oid(nslc);
5762 : SysScanDesc conscan;
5763 : ScanKeyData skey[2];
5764 : HeapTuple tup;
5765 :
5766 120 : ScanKeyInit(&skey[0],
5767 : Anum_pg_constraint_conname,
5768 : BTEqualStrategyNumber, F_NAMEEQ,
5769 120 : CStringGetDatum(constraint->relname));
5770 120 : ScanKeyInit(&skey[1],
5771 : Anum_pg_constraint_connamespace,
5772 : BTEqualStrategyNumber, F_OIDEQ,
5773 : ObjectIdGetDatum(namespaceId));
5774 :
5775 120 : conscan = systable_beginscan(conrel, ConstraintNameNspIndexId,
5776 : true, NULL, 2, skey);
5777 :
5778 216 : while (HeapTupleIsValid(tup = systable_getnext(conscan)))
5779 : {
5780 96 : Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tup);
5781 :
5782 96 : if (con->condeferrable)
5783 96 : conoidlist = lappend_oid(conoidlist, con->oid);
5784 0 : else if (stmt->deferred)
5785 0 : ereport(ERROR,
5786 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
5787 : errmsg("constraint \"%s\" is not deferrable",
5788 : constraint->relname)));
5789 96 : found = true;
5790 : }
5791 :
5792 120 : systable_endscan(conscan);
5793 :
5794 : /*
5795 : * Once we've found a matching constraint we do not search
5796 : * later parts of the search path.
5797 : */
5798 120 : if (found)
5799 48 : break;
5800 : }
5801 :
5802 48 : list_free(namespacelist);
5803 :
5804 : /*
5805 : * Not found ?
5806 : */
5807 48 : if (!found)
5808 0 : ereport(ERROR,
5809 : (errcode(ERRCODE_UNDEFINED_OBJECT),
5810 : errmsg("constraint \"%s\" does not exist",
5811 : constraint->relname)));
5812 : }
5813 :
5814 : /*
5815 : * Scan for any possible descendants of the constraints. We append
5816 : * whatever we find to the same list that we're scanning; this has the
5817 : * effect that we create new scans for those, too, so if there are
5818 : * further descendents, we'll also catch them.
5819 : */
5820 258 : foreach(lc, conoidlist)
5821 : {
5822 210 : Oid parent = lfirst_oid(lc);
5823 : ScanKeyData key;
5824 : SysScanDesc scan;
5825 : HeapTuple tuple;
5826 :
5827 210 : ScanKeyInit(&key,
5828 : Anum_pg_constraint_conparentid,
5829 : BTEqualStrategyNumber, F_OIDEQ,
5830 : ObjectIdGetDatum(parent));
5831 :
5832 210 : scan = systable_beginscan(conrel, ConstraintParentIndexId, true, NULL, 1, &key);
5833 :
5834 324 : while (HeapTupleIsValid(tuple = systable_getnext(scan)))
5835 : {
5836 114 : Form_pg_constraint con = (Form_pg_constraint) GETSTRUCT(tuple);
5837 :
5838 114 : conoidlist = lappend_oid(conoidlist, con->oid);
5839 : }
5840 :
5841 210 : systable_endscan(scan);
5842 : }
5843 :
5844 48 : table_close(conrel, AccessShareLock);
5845 :
5846 : /*
5847 : * Now, locate the trigger(s) implementing each of these constraints,
5848 : * and make a list of their OIDs.
5849 : */
5850 48 : tgrel = table_open(TriggerRelationId, AccessShareLock);
5851 :
5852 258 : foreach(lc, conoidlist)
5853 : {
5854 210 : Oid conoid = lfirst_oid(lc);
5855 : ScanKeyData skey;
5856 : SysScanDesc tgscan;
5857 : HeapTuple htup;
5858 :
5859 210 : ScanKeyInit(&skey,
5860 : Anum_pg_trigger_tgconstraint,
5861 : BTEqualStrategyNumber, F_OIDEQ,
5862 : ObjectIdGetDatum(conoid));
5863 :
5864 210 : tgscan = systable_beginscan(tgrel, TriggerConstraintIndexId, true,
5865 : NULL, 1, &skey);
5866 :
5867 648 : while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
5868 : {
5869 438 : Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
5870 :
5871 : /*
5872 : * Silently skip triggers that are marked as non-deferrable in
5873 : * pg_trigger. This is not an error condition, since a
5874 : * deferrable RI constraint may have some non-deferrable
5875 : * actions.
5876 : */
5877 438 : if (pg_trigger->tgdeferrable)
5878 438 : tgoidlist = lappend_oid(tgoidlist, pg_trigger->oid);
5879 : }
5880 :
5881 210 : systable_endscan(tgscan);
5882 : }
5883 :
5884 48 : table_close(tgrel, AccessShareLock);
5885 :
5886 : /*
5887 : * Now we can set the trigger states of individual triggers for this
5888 : * xact.
5889 : */
5890 486 : foreach(lc, tgoidlist)
5891 : {
5892 438 : Oid tgoid = lfirst_oid(lc);
5893 438 : SetConstraintState state = afterTriggers.state;
5894 438 : bool found = false;
5895 : int i;
5896 :
5897 2448 : for (i = 0; i < state->numstates; i++)
5898 : {
5899 2106 : if (state->trigstates[i].sct_tgoid == tgoid)
5900 : {
5901 96 : state->trigstates[i].sct_tgisdeferred = stmt->deferred;
5902 96 : found = true;
5903 96 : break;
5904 : }
5905 : }
5906 438 : if (!found)
5907 : {
5908 342 : afterTriggers.state =
5909 342 : SetConstraintStateAddItem(state, tgoid, stmt->deferred);
5910 : }
5911 : }
5912 : }
5913 :
5914 : /*
5915 : * SQL99 requires that when a constraint is set to IMMEDIATE, any deferred
5916 : * checks against that constraint must be made when the SET CONSTRAINTS
5917 : * command is executed -- i.e. the effects of the SET CONSTRAINTS command
5918 : * apply retroactively. We've updated the constraints state, so scan the
5919 : * list of previously deferred events to fire any that have now become
5920 : * immediate.
5921 : *
5922 : * Obviously, if this was SET ... DEFERRED then it can't have converted
5923 : * any unfired events to immediate, so we need do nothing in that case.
5924 : */
5925 102 : if (!stmt->deferred)
5926 : {
5927 34 : AfterTriggerEventList *events = &afterTriggers.events;
5928 34 : bool snapshot_set = false;
5929 :
5930 34 : while (afterTriggerMarkEvents(events, NULL, true))
5931 : {
5932 16 : CommandId firing_id = afterTriggers.firing_counter++;
5933 :
5934 : /*
5935 : * Make sure a snapshot has been established in case trigger
5936 : * functions need one. Note that we avoid setting a snapshot if
5937 : * we don't find at least one trigger that has to be fired now.
5938 : * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION
5939 : * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are
5940 : * at the start of a transaction it's not possible for any trigger
5941 : * events to be queued yet.)
5942 : */
5943 16 : if (!snapshot_set)
5944 : {
5945 16 : PushActiveSnapshot(GetTransactionSnapshot());
5946 16 : snapshot_set = true;
5947 : }
5948 :
5949 : /*
5950 : * We can delete fired events if we are at top transaction level,
5951 : * but we'd better not if inside a subtransaction, since the
5952 : * subtransaction could later get rolled back.
5953 : */
5954 0 : if (afterTriggerInvokeEvents(events, firing_id, NULL,
5955 16 : !IsSubTransaction()))
5956 0 : break; /* all fired */
5957 : }
5958 :
5959 18 : if (snapshot_set)
5960 0 : PopActiveSnapshot();
5961 : }
5962 86 : }
5963 :
5964 : /* ----------
5965 : * AfterTriggerPendingOnRel()
5966 : * Test to see if there are any pending after-trigger events for rel.
5967 : *
5968 : * This is used by TRUNCATE, CLUSTER, ALTER TABLE, etc to detect whether
5969 : * it is unsafe to perform major surgery on a relation. Note that only
5970 : * local pending events are examined. We assume that having exclusive lock
5971 : * on a rel guarantees there are no unserviced events in other backends ---
5972 : * but having a lock does not prevent there being such events in our own.
5973 : *
5974 : * In some scenarios it'd be reasonable to remove pending events (more
5975 : * specifically, mark them DONE by the current subxact) but without a lot
5976 : * of knowledge of the trigger semantics we can't do this in general.
5977 : * ----------
5978 : */
5979 : bool
5980 126262 : AfterTriggerPendingOnRel(Oid relid)
5981 : {
5982 : AfterTriggerEvent event;
5983 : AfterTriggerEventChunk *chunk;
5984 : int depth;
5985 :
5986 : /* Scan queued events */
5987 126298 : for_each_event_chunk(event, chunk, afterTriggers.events)
5988 : {
5989 36 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
5990 :
5991 : /*
5992 : * We can ignore completed events. (Even if a DONE flag is rolled
5993 : * back by subxact abort, it's OK because the effects of the TRUNCATE
5994 : * or whatever must get rolled back too.)
5995 : */
5996 36 : if (event->ate_flags & AFTER_TRIGGER_DONE)
5997 0 : continue;
5998 :
5999 36 : if (evtshared->ats_relid == relid)
6000 18 : return true;
6001 : }
6002 :
6003 : /*
6004 : * Also scan events queued by incomplete queries. This could only matter
6005 : * if TRUNCATE/etc is executed by a function or trigger within an updating
6006 : * query on the same relation, which is pretty perverse, but let's check.
6007 : */
6008 126244 : for (depth = 0; depth <= afterTriggers.query_depth && depth < afterTriggers.maxquerydepth; depth++)
6009 : {
6010 0 : for_each_event_chunk(event, chunk, afterTriggers.query_stack[depth].events)
6011 : {
6012 0 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
6013 :
6014 0 : if (event->ate_flags & AFTER_TRIGGER_DONE)
6015 0 : continue;
6016 :
6017 0 : if (evtshared->ats_relid == relid)
6018 0 : return true;
6019 : }
6020 : }
6021 :
6022 126244 : return false;
6023 : }
6024 :
6025 : /* ----------
6026 : * AfterTriggerSaveEvent()
6027 : *
6028 : * Called by ExecA[RS]...Triggers() to queue up the triggers that should
6029 : * be fired for an event.
6030 : *
6031 : * NOTE: this is called whenever there are any triggers associated with
6032 : * the event (even if they are disabled). This function decides which
6033 : * triggers actually need to be queued. It is also called after each row,
6034 : * even if there are no triggers for that event, if there are any AFTER
6035 : * STATEMENT triggers for the statement which use transition tables, so that
6036 : * the transition tuplestores can be built. Furthermore, if the transition
6037 : * capture is happening for UPDATEd rows being moved to another partition due
6038 : * to the partition-key being changed, then this function is called once when
6039 : * the row is deleted (to capture OLD row), and once when the row is inserted
6040 : * into another partition (to capture NEW row). This is done separately because
6041 : * DELETE and INSERT happen on different tables.
6042 : *
6043 : * Transition tuplestores are built now, rather than when events are pulled
6044 : * off of the queue because AFTER ROW triggers are allowed to select from the
6045 : * transition tables for the statement.
6046 : *
6047 : * This contains special support to queue the update events for the case where
6048 : * a partitioned table undergoing a cross-partition update may have foreign
6049 : * keys pointing into it. Normally, a partitioned table's row triggers are
6050 : * not fired because the leaf partition(s) which are modified as a result of
6051 : * the operation on the partitioned table contain the same triggers which are
6052 : * fired instead. But that general scheme can cause problematic behavior with
6053 : * foreign key triggers during cross-partition updates, which are implemented
6054 : * as DELETE on the source partition followed by INSERT into the destination
6055 : * partition. Specifically, firing DELETE triggers would lead to the wrong
6056 : * foreign key action to be enforced considering that the original command is
6057 : * UPDATE; in this case, this function is called with relinfo as the
6058 : * partitioned table, and src_partinfo and dst_partinfo referring to the
6059 : * source and target leaf partitions, respectively.
6060 : *
6061 : * is_crosspart_update is true either when a DELETE event is fired on the
6062 : * source partition (which is to be ignored) or an UPDATE event is fired on
6063 : * the root partitioned table.
6064 : * ----------
6065 : */
6066 : static void
6067 76474 : AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo,
6068 : ResultRelInfo *src_partinfo,
6069 : ResultRelInfo *dst_partinfo,
6070 : int event, bool row_trigger,
6071 : TupleTableSlot *oldslot, TupleTableSlot *newslot,
6072 : List *recheckIndexes, Bitmapset *modifiedCols,
6073 : TransitionCaptureState *transition_capture,
6074 : bool is_crosspart_update)
6075 : {
6076 76474 : Relation rel = relinfo->ri_RelationDesc;
6077 76474 : TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
6078 : AfterTriggerEventData new_event;
6079 : AfterTriggerSharedData new_shared;
6080 76474 : char relkind = rel->rd_rel->relkind;
6081 : int tgtype_event;
6082 : int tgtype_level;
6083 : int i;
6084 76474 : Tuplestorestate *fdw_tuplestore = NULL;
6085 :
6086 : /*
6087 : * Check state. We use a normal test not Assert because it is possible to
6088 : * reach here in the wrong state given misconfigured RI triggers, in
6089 : * particular deferring a cascade action trigger.
6090 : */
6091 76474 : if (afterTriggers.query_depth < 0)
6092 0 : elog(ERROR, "AfterTriggerSaveEvent() called outside of query");
6093 :
6094 : /* Be sure we have enough space to record events at this query depth. */
6095 76474 : if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
6096 6150 : AfterTriggerEnlargeQueryState();
6097 :
6098 : /*
6099 : * If the directly named relation has any triggers with transition tables,
6100 : * then we need to capture transition tuples.
6101 : */
6102 76474 : if (row_trigger && transition_capture != NULL)
6103 : {
6104 65772 : TupleTableSlot *original_insert_tuple = transition_capture->tcs_original_insert_tuple;
6105 :
6106 : /*
6107 : * Capture the old tuple in the appropriate transition table based on
6108 : * the event.
6109 : */
6110 65772 : if (!TupIsNull(oldslot))
6111 : {
6112 : Tuplestorestate *old_tuplestore;
6113 :
6114 5394 : old_tuplestore = GetAfterTriggersTransitionTable(event,
6115 : oldslot,
6116 : NULL,
6117 : transition_capture);
6118 5394 : TransitionTableAddTuple(estate, transition_capture, relinfo,
6119 : oldslot, NULL, old_tuplestore);
6120 : }
6121 :
6122 : /*
6123 : * Capture the new tuple in the appropriate transition table based on
6124 : * the event.
6125 : */
6126 65772 : if (!TupIsNull(newslot))
6127 : {
6128 : Tuplestorestate *new_tuplestore;
6129 :
6130 60690 : new_tuplestore = GetAfterTriggersTransitionTable(event,
6131 : NULL,
6132 : newslot,
6133 : transition_capture);
6134 60690 : TransitionTableAddTuple(estate, transition_capture, relinfo,
6135 : newslot, original_insert_tuple, new_tuplestore);
6136 : }
6137 :
6138 : /*
6139 : * If transition tables are the only reason we're here, return. As
6140 : * mentioned above, we can also be here during update tuple routing in
6141 : * presence of transition tables, in which case this function is
6142 : * called separately for OLD and NEW, so we expect exactly one of them
6143 : * to be NULL.
6144 : */
6145 65772 : if (trigdesc == NULL ||
6146 65568 : (event == TRIGGER_EVENT_DELETE && !trigdesc->trig_delete_after_row) ||
6147 60588 : (event == TRIGGER_EVENT_INSERT && !trigdesc->trig_insert_after_row) ||
6148 354 : (event == TRIGGER_EVENT_UPDATE && !trigdesc->trig_update_after_row) ||
6149 36 : (event == TRIGGER_EVENT_UPDATE && (TupIsNull(oldslot) ^ TupIsNull(newslot))))
6150 65658 : return;
6151 : }
6152 :
6153 : /*
6154 : * We normally don't see partitioned tables here for row level triggers
6155 : * except in the special case of a cross-partition update. In that case,
6156 : * nodeModifyTable.c:ExecCrossPartitionUpdateForeignKey() calls here to
6157 : * queue an update event on the root target partitioned table, also
6158 : * passing the source and destination partitions and their tuples.
6159 : */
6160 : Assert(!row_trigger ||
6161 : rel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE ||
6162 : (is_crosspart_update &&
6163 : TRIGGER_FIRED_BY_UPDATE(event) &&
6164 : src_partinfo != NULL && dst_partinfo != NULL));
6165 :
6166 : /*
6167 : * Validate the event code and collect the associated tuple CTIDs.
6168 : *
6169 : * The event code will be used both as a bitmask and an array offset, so
6170 : * validation is important to make sure we don't walk off the edge of our
6171 : * arrays.
6172 : *
6173 : * Also, if we're considering statement-level triggers, check whether we
6174 : * already queued a set of them for this event, and cancel the prior set
6175 : * if so. This preserves the behavior that statement-level triggers fire
6176 : * just once per statement and fire after row-level triggers.
6177 : */
6178 10816 : switch (event)
6179 : {
6180 5638 : case TRIGGER_EVENT_INSERT:
6181 5638 : tgtype_event = TRIGGER_TYPE_INSERT;
6182 5638 : if (row_trigger)
6183 : {
6184 : Assert(oldslot == NULL);
6185 : Assert(newslot != NULL);
6186 5214 : ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid1));
6187 5214 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
6188 : }
6189 : else
6190 : {
6191 : Assert(oldslot == NULL);
6192 : Assert(newslot == NULL);
6193 424 : ItemPointerSetInvalid(&(new_event.ate_ctid1));
6194 424 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
6195 424 : cancel_prior_stmt_triggers(RelationGetRelid(rel),
6196 : CMD_INSERT, event);
6197 : }
6198 5638 : break;
6199 1444 : case TRIGGER_EVENT_DELETE:
6200 1444 : tgtype_event = TRIGGER_TYPE_DELETE;
6201 1444 : if (row_trigger)
6202 : {
6203 : Assert(oldslot != NULL);
6204 : Assert(newslot == NULL);
6205 1214 : ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
6206 1214 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
6207 : }
6208 : else
6209 : {
6210 : Assert(oldslot == NULL);
6211 : Assert(newslot == NULL);
6212 230 : ItemPointerSetInvalid(&(new_event.ate_ctid1));
6213 230 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
6214 230 : cancel_prior_stmt_triggers(RelationGetRelid(rel),
6215 : CMD_DELETE, event);
6216 : }
6217 1444 : break;
6218 3726 : case TRIGGER_EVENT_UPDATE:
6219 3726 : tgtype_event = TRIGGER_TYPE_UPDATE;
6220 3726 : if (row_trigger)
6221 : {
6222 : Assert(oldslot != NULL);
6223 : Assert(newslot != NULL);
6224 3330 : ItemPointerCopy(&(oldslot->tts_tid), &(new_event.ate_ctid1));
6225 3330 : ItemPointerCopy(&(newslot->tts_tid), &(new_event.ate_ctid2));
6226 :
6227 : /*
6228 : * Also remember the OIDs of partitions to fetch these tuples
6229 : * out of later in AfterTriggerExecute().
6230 : */
6231 3330 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
6232 : {
6233 : Assert(src_partinfo != NULL && dst_partinfo != NULL);
6234 282 : new_event.ate_src_part =
6235 282 : RelationGetRelid(src_partinfo->ri_RelationDesc);
6236 282 : new_event.ate_dst_part =
6237 282 : RelationGetRelid(dst_partinfo->ri_RelationDesc);
6238 : }
6239 : }
6240 : else
6241 : {
6242 : Assert(oldslot == NULL);
6243 : Assert(newslot == NULL);
6244 396 : ItemPointerSetInvalid(&(new_event.ate_ctid1));
6245 396 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
6246 396 : cancel_prior_stmt_triggers(RelationGetRelid(rel),
6247 : CMD_UPDATE, event);
6248 : }
6249 3726 : break;
6250 8 : case TRIGGER_EVENT_TRUNCATE:
6251 8 : tgtype_event = TRIGGER_TYPE_TRUNCATE;
6252 : Assert(oldslot == NULL);
6253 : Assert(newslot == NULL);
6254 8 : ItemPointerSetInvalid(&(new_event.ate_ctid1));
6255 8 : ItemPointerSetInvalid(&(new_event.ate_ctid2));
6256 8 : break;
6257 0 : default:
6258 0 : elog(ERROR, "invalid after-trigger event code: %d", event);
6259 : tgtype_event = 0; /* keep compiler quiet */
6260 : break;
6261 : }
6262 :
6263 : /* Determine flags */
6264 10816 : if (!(relkind == RELKIND_FOREIGN_TABLE && row_trigger))
6265 : {
6266 10760 : if (row_trigger && event == TRIGGER_EVENT_UPDATE)
6267 : {
6268 3310 : if (relkind == RELKIND_PARTITIONED_TABLE)
6269 282 : new_event.ate_flags = AFTER_TRIGGER_CP_UPDATE;
6270 : else
6271 3028 : new_event.ate_flags = AFTER_TRIGGER_2CTID;
6272 : }
6273 : else
6274 7450 : new_event.ate_flags = AFTER_TRIGGER_1CTID;
6275 : }
6276 :
6277 : /* else, we'll initialize ate_flags for each trigger */
6278 :
6279 10816 : tgtype_level = (row_trigger ? TRIGGER_TYPE_ROW : TRIGGER_TYPE_STATEMENT);
6280 :
6281 : /*
6282 : * Must convert/copy the source and destination partition tuples into the
6283 : * root partitioned table's format/slot, because the processing in the
6284 : * loop below expects both oldslot and newslot tuples to be in that form.
6285 : */
6286 10816 : if (row_trigger && rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
6287 : {
6288 : TupleTableSlot *rootslot;
6289 : TupleConversionMap *map;
6290 :
6291 282 : rootslot = ExecGetTriggerOldSlot(estate, relinfo);
6292 282 : map = ExecGetChildToRootMap(src_partinfo);
6293 282 : if (map)
6294 36 : oldslot = execute_attr_map_slot(map->attrMap,
6295 : oldslot,
6296 : rootslot);
6297 : else
6298 246 : oldslot = ExecCopySlot(rootslot, oldslot);
6299 :
6300 282 : rootslot = ExecGetTriggerNewSlot(estate, relinfo);
6301 282 : map = ExecGetChildToRootMap(dst_partinfo);
6302 282 : if (map)
6303 36 : newslot = execute_attr_map_slot(map->attrMap,
6304 : newslot,
6305 : rootslot);
6306 : else
6307 246 : newslot = ExecCopySlot(rootslot, newslot);
6308 : }
6309 :
6310 49572 : for (i = 0; i < trigdesc->numtriggers; i++)
6311 : {
6312 38756 : Trigger *trigger = &trigdesc->triggers[i];
6313 :
6314 38756 : if (!TRIGGER_TYPE_MATCHES(trigger->tgtype,
6315 : tgtype_level,
6316 : TRIGGER_TYPE_AFTER,
6317 : tgtype_event))
6318 24964 : continue;
6319 13792 : if (!TriggerEnabled(estate, relinfo, trigger, event,
6320 : modifiedCols, oldslot, newslot))
6321 368 : continue;
6322 :
6323 13424 : if (relkind == RELKIND_FOREIGN_TABLE && row_trigger)
6324 : {
6325 58 : if (fdw_tuplestore == NULL)
6326 : {
6327 50 : fdw_tuplestore = GetCurrentFDWTuplestore();
6328 50 : new_event.ate_flags = AFTER_TRIGGER_FDW_FETCH;
6329 : }
6330 : else
6331 : /* subsequent event for the same tuple */
6332 8 : new_event.ate_flags = AFTER_TRIGGER_FDW_REUSE;
6333 : }
6334 :
6335 : /*
6336 : * If the trigger is a foreign key enforcement trigger, there are
6337 : * certain cases where we can skip queueing the event because we can
6338 : * tell by inspection that the FK constraint will still pass. There
6339 : * are also some cases during cross-partition updates of a partitioned
6340 : * table where queuing the event can be skipped.
6341 : */
6342 13424 : if (TRIGGER_FIRED_BY_UPDATE(event) || TRIGGER_FIRED_BY_DELETE(event))
6343 : {
6344 6600 : switch (RI_FKey_trigger_type(trigger->tgfoid))
6345 : {
6346 2668 : case RI_TRIGGER_PK:
6347 :
6348 : /*
6349 : * For cross-partitioned updates of partitioned PK table,
6350 : * skip the event fired by the component delete on the
6351 : * source leaf partition unless the constraint originates
6352 : * in the partition itself (!tgisclone), because the
6353 : * update event that will be fired on the root
6354 : * (partitioned) target table will be used to perform the
6355 : * necessary foreign key enforcement action.
6356 : */
6357 2668 : if (is_crosspart_update &&
6358 498 : TRIGGER_FIRED_BY_DELETE(event) &&
6359 264 : trigger->tgisclone)
6360 246 : continue;
6361 :
6362 : /* Update or delete on trigger's PK table */
6363 2422 : if (!RI_FKey_pk_upd_check_required(trigger, rel,
6364 : oldslot, newslot))
6365 : {
6366 : /* skip queuing this event */
6367 542 : continue;
6368 : }
6369 1880 : break;
6370 :
6371 1182 : case RI_TRIGGER_FK:
6372 :
6373 : /*
6374 : * Update on trigger's FK table. We can skip the update
6375 : * event fired on a partitioned table during a
6376 : * cross-partition of that table, because the insert event
6377 : * that is fired on the destination leaf partition would
6378 : * suffice to perform the necessary foreign key check.
6379 : * Moreover, RI_FKey_fk_upd_check_required() expects to be
6380 : * passed a tuple that contains system attributes, most of
6381 : * which are not present in the virtual slot belonging to
6382 : * a partitioned table.
6383 : */
6384 1182 : if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ||
6385 1068 : !RI_FKey_fk_upd_check_required(trigger, rel,
6386 : oldslot, newslot))
6387 : {
6388 : /* skip queuing this event */
6389 716 : continue;
6390 : }
6391 466 : break;
6392 :
6393 2750 : case RI_TRIGGER_NONE:
6394 :
6395 : /*
6396 : * Not an FK trigger. No need to queue the update event
6397 : * fired during a cross-partitioned update of a
6398 : * partitioned table, because the same row trigger must be
6399 : * present in the leaf partition(s) that are affected as
6400 : * part of this update and the events fired on them are
6401 : * queued instead.
6402 : */
6403 2750 : if (row_trigger &&
6404 2088 : rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
6405 30 : continue;
6406 2720 : break;
6407 : }
6408 6824 : }
6409 :
6410 : /*
6411 : * If the trigger is a deferred unique constraint check trigger, only
6412 : * queue it if the unique constraint was potentially violated, which
6413 : * we know from index insertion time.
6414 : */
6415 11890 : if (trigger->tgfoid == F_UNIQUE_KEY_RECHECK)
6416 : {
6417 210 : if (!list_member_oid(recheckIndexes, trigger->tgconstrindid))
6418 88 : continue; /* Uniqueness definitely not violated */
6419 : }
6420 :
6421 : /*
6422 : * Fill in event structure and add it to the current query's queue.
6423 : * Note we set ats_table to NULL whenever this trigger doesn't use
6424 : * transition tables, to improve sharability of the shared event data.
6425 : */
6426 11802 : new_shared.ats_event =
6427 23604 : (event & TRIGGER_EVENT_OPMASK) |
6428 11802 : (row_trigger ? TRIGGER_EVENT_ROW : 0) |
6429 11802 : (trigger->tgdeferrable ? AFTER_TRIGGER_DEFERRABLE : 0) |
6430 11802 : (trigger->tginitdeferred ? AFTER_TRIGGER_INITDEFERRED : 0);
6431 11802 : new_shared.ats_tgoid = trigger->tgoid;
6432 11802 : new_shared.ats_relid = RelationGetRelid(rel);
6433 11802 : new_shared.ats_firing_id = 0;
6434 11802 : if ((trigger->tgoldtable || trigger->tgnewtable) &&
6435 : transition_capture != NULL)
6436 606 : new_shared.ats_table = transition_capture->tcs_private;
6437 : else
6438 11196 : new_shared.ats_table = NULL;
6439 11802 : new_shared.ats_modifiedcols = afterTriggerCopyBitmap(modifiedCols);
6440 :
6441 11802 : afterTriggerAddEvent(&afterTriggers.query_stack[afterTriggers.query_depth].events,
6442 : &new_event, &new_shared);
6443 : }
6444 :
6445 : /*
6446 : * Finally, spool any foreign tuple(s). The tuplestore squashes them to
6447 : * minimal tuples, so this loses any system columns. The executor lost
6448 : * those columns before us, for an unrelated reason, so this is fine.
6449 : */
6450 10816 : if (fdw_tuplestore)
6451 : {
6452 50 : if (oldslot != NULL)
6453 32 : tuplestore_puttupleslot(fdw_tuplestore, oldslot);
6454 50 : if (newslot != NULL)
6455 36 : tuplestore_puttupleslot(fdw_tuplestore, newslot);
6456 : }
6457 : }
6458 :
6459 : /*
6460 : * Detect whether we already queued BEFORE STATEMENT triggers for the given
6461 : * relation + operation, and set the flag so the next call will report "true".
6462 : */
6463 : static bool
6464 510 : before_stmt_triggers_fired(Oid relid, CmdType cmdType)
6465 : {
6466 : bool result;
6467 : AfterTriggersTableData *table;
6468 :
6469 : /* Check state, like AfterTriggerSaveEvent. */
6470 510 : if (afterTriggers.query_depth < 0)
6471 0 : elog(ERROR, "before_stmt_triggers_fired() called outside of query");
6472 :
6473 : /* Be sure we have enough space to record events at this query depth. */
6474 510 : if (afterTriggers.query_depth >= afterTriggers.maxquerydepth)
6475 336 : AfterTriggerEnlargeQueryState();
6476 :
6477 : /*
6478 : * We keep this state in the AfterTriggersTableData that also holds
6479 : * transition tables for the relation + operation. In this way, if we are
6480 : * forced to make a new set of transition tables because more tuples get
6481 : * entered after we've already fired triggers, we will allow a new set of
6482 : * statement triggers to get queued.
6483 : */
6484 510 : table = GetAfterTriggersTableData(relid, cmdType);
6485 510 : result = table->before_trig_done;
6486 510 : table->before_trig_done = true;
6487 510 : return result;
6488 : }
6489 :
6490 : /*
6491 : * If we previously queued a set of AFTER STATEMENT triggers for the given
6492 : * relation + operation, and they've not been fired yet, cancel them. The
6493 : * caller will queue a fresh set that's after any row-level triggers that may
6494 : * have been queued by the current sub-statement, preserving (as much as
6495 : * possible) the property that AFTER ROW triggers fire before AFTER STATEMENT
6496 : * triggers, and that the latter only fire once. This deals with the
6497 : * situation where several FK enforcement triggers sequentially queue triggers
6498 : * for the same table into the same trigger query level. We can't fully
6499 : * prevent odd behavior though: if there are AFTER ROW triggers taking
6500 : * transition tables, we don't want to change the transition tables once the
6501 : * first such trigger has seen them. In such a case, any additional events
6502 : * will result in creating new transition tables and allowing new firings of
6503 : * statement triggers.
6504 : *
6505 : * This also saves the current event list location so that a later invocation
6506 : * of this function can cheaply find the triggers we're about to queue and
6507 : * cancel them.
6508 : */
6509 : static void
6510 1050 : cancel_prior_stmt_triggers(Oid relid, CmdType cmdType, int tgevent)
6511 : {
6512 : AfterTriggersTableData *table;
6513 1050 : AfterTriggersQueryData *qs = &afterTriggers.query_stack[afterTriggers.query_depth];
6514 :
6515 : /*
6516 : * We keep this state in the AfterTriggersTableData that also holds
6517 : * transition tables for the relation + operation. In this way, if we are
6518 : * forced to make a new set of transition tables because more tuples get
6519 : * entered after we've already fired triggers, we will allow a new set of
6520 : * statement triggers to get queued without canceling the old ones.
6521 : */
6522 1050 : table = GetAfterTriggersTableData(relid, cmdType);
6523 :
6524 1050 : if (table->after_trig_done)
6525 : {
6526 : /*
6527 : * We want to start scanning from the tail location that existed just
6528 : * before we inserted any statement triggers. But the events list
6529 : * might've been entirely empty then, in which case scan from the
6530 : * current head.
6531 : */
6532 : AfterTriggerEvent event;
6533 : AfterTriggerEventChunk *chunk;
6534 :
6535 66 : if (table->after_trig_events.tail)
6536 : {
6537 60 : chunk = table->after_trig_events.tail;
6538 60 : event = (AfterTriggerEvent) table->after_trig_events.tailfree;
6539 : }
6540 : else
6541 : {
6542 6 : chunk = qs->events.head;
6543 6 : event = NULL;
6544 : }
6545 :
6546 96 : for_each_chunk_from(chunk)
6547 : {
6548 66 : if (event == NULL)
6549 6 : event = (AfterTriggerEvent) CHUNK_DATA_START(chunk);
6550 138 : for_each_event_from(event, chunk)
6551 : {
6552 108 : AfterTriggerShared evtshared = GetTriggerSharedData(event);
6553 :
6554 : /*
6555 : * Exit loop when we reach events that aren't AS triggers for
6556 : * the target relation.
6557 : */
6558 108 : if (evtshared->ats_relid != relid)
6559 0 : goto done;
6560 108 : if ((evtshared->ats_event & TRIGGER_EVENT_OPMASK) != tgevent)
6561 0 : goto done;
6562 108 : if (!TRIGGER_FIRED_FOR_STATEMENT(evtshared->ats_event))
6563 36 : goto done;
6564 72 : if (!TRIGGER_FIRED_AFTER(evtshared->ats_event))
6565 0 : goto done;
6566 : /* OK, mark it DONE */
6567 72 : event->ate_flags &= ~AFTER_TRIGGER_IN_PROGRESS;
6568 72 : event->ate_flags |= AFTER_TRIGGER_DONE;
6569 : }
6570 : /* signal we must reinitialize event ptr for next chunk */
6571 30 : event = NULL;
6572 : }
6573 : }
6574 1014 : done:
6575 :
6576 : /* In any case, save current insertion point for next time */
6577 1050 : table->after_trig_done = true;
6578 1050 : table->after_trig_events = qs->events;
6579 1050 : }
6580 :
6581 : /*
6582 : * GUC assign_hook for session_replication_role
6583 : */
6584 : void
6585 2766 : assign_session_replication_role(int newval, void *extra)
6586 : {
6587 : /*
6588 : * Must flush the plan cache when changing replication role; but don't
6589 : * flush unnecessarily.
6590 : */
6591 2766 : if (SessionReplicationRole != newval)
6592 800 : ResetPlanCache();
6593 2766 : }
6594 :
6595 : /*
6596 : * SQL function pg_trigger_depth()
6597 : */
6598 : Datum
6599 90 : pg_trigger_depth(PG_FUNCTION_ARGS)
6600 : {
6601 90 : PG_RETURN_INT32(MyTriggerDepth);
6602 : }
|