Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * execMain.c
4 : * top level executor interface routines
5 : *
6 : * INTERFACE ROUTINES
7 : * ExecutorStart()
8 : * ExecutorRun()
9 : * ExecutorFinish()
10 : * ExecutorEnd()
11 : *
12 : * These four procedures are the external interface to the executor.
13 : * In each case, the query descriptor is required as an argument.
14 : *
15 : * ExecutorStart must be called at the beginning of execution of any
16 : * query plan and ExecutorEnd must always be called at the end of
17 : * execution of a plan (unless it is aborted due to error).
18 : *
19 : * ExecutorRun accepts direction and count arguments that specify whether
20 : * the plan is to be executed forwards, backwards, and for how many tuples.
21 : * In some cases ExecutorRun may be called multiple times to process all
22 : * the tuples for a plan. It is also acceptable to stop short of executing
23 : * the whole plan (but only if it is a SELECT).
24 : *
25 : * ExecutorFinish must be called after the final ExecutorRun call and
26 : * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 : * which should also omit ExecutorRun.
28 : *
29 : * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
30 : * Portions Copyright (c) 1994, Regents of the University of California
31 : *
32 : *
33 : * IDENTIFICATION
34 : * src/backend/executor/execMain.c
35 : *
36 : *-------------------------------------------------------------------------
37 : */
38 : #include "postgres.h"
39 :
40 : #include "access/sysattr.h"
41 : #include "access/table.h"
42 : #include "access/tableam.h"
43 : #include "access/xact.h"
44 : #include "catalog/namespace.h"
45 : #include "catalog/partition.h"
46 : #include "commands/matview.h"
47 : #include "commands/trigger.h"
48 : #include "executor/executor.h"
49 : #include "executor/execPartition.h"
50 : #include "executor/nodeSubplan.h"
51 : #include "foreign/fdwapi.h"
52 : #include "mb/pg_wchar.h"
53 : #include "miscadmin.h"
54 : #include "nodes/queryjumble.h"
55 : #include "parser/parse_relation.h"
56 : #include "pgstat.h"
57 : #include "rewrite/rewriteHandler.h"
58 : #include "tcop/utility.h"
59 : #include "utils/acl.h"
60 : #include "utils/backend_status.h"
61 : #include "utils/lsyscache.h"
62 : #include "utils/partcache.h"
63 : #include "utils/rls.h"
64 : #include "utils/snapmgr.h"
65 :
66 :
67 : /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
68 : ExecutorStart_hook_type ExecutorStart_hook = NULL;
69 : ExecutorRun_hook_type ExecutorRun_hook = NULL;
70 : ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
71 : ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
72 :
73 : /* Hook for plugin to get control in ExecCheckPermissions() */
74 : ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
75 :
76 : /* decls for local routines only used within this module */
77 : static void InitPlan(QueryDesc *queryDesc, int eflags);
78 : static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
79 : static void ExecPostprocessPlan(EState *estate);
80 : static void ExecEndPlan(PlanState *planstate, EState *estate);
81 : static void ExecutePlan(QueryDesc *queryDesc,
82 : CmdType operation,
83 : bool sendTuples,
84 : uint64 numberTuples,
85 : ScanDirection direction,
86 : DestReceiver *dest);
87 : static bool ExecCheckPermissionsModified(Oid relOid, Oid userid,
88 : Bitmapset *modifiedCols,
89 : AclMode requiredPerms);
90 : static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
91 : static void EvalPlanQualStart(EPQState *epqstate, Plan *planTree);
92 : static void ReportNotNullViolationError(ResultRelInfo *resultRelInfo,
93 : TupleTableSlot *slot,
94 : EState *estate, int attnum);
95 :
96 : /* end of local decls */
97 :
98 :
99 : /* ----------------------------------------------------------------
100 : * ExecutorStart
101 : *
102 : * This routine must be called at the beginning of any execution of any
103 : * query plan
104 : *
105 : * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
106 : * only because some places use QueryDescs for utility commands). The tupDesc
107 : * field of the QueryDesc is filled in to describe the tuples that will be
108 : * returned, and the internal fields (estate and planstate) are set up.
109 : *
110 : * eflags contains flag bits as described in executor.h.
111 : *
112 : * NB: the CurrentMemoryContext when this is called will become the parent
113 : * of the per-query context used for this Executor invocation.
114 : *
115 : * We provide a function hook variable that lets loadable plugins
116 : * get control when ExecutorStart is called. Such a plugin would
117 : * normally call standard_ExecutorStart().
118 : *
119 : * ----------------------------------------------------------------
120 : */
121 : void
122 573458 : ExecutorStart(QueryDesc *queryDesc, int eflags)
123 : {
124 : /*
125 : * In some cases (e.g. an EXECUTE statement or an execute message with the
126 : * extended query protocol) the query_id won't be reported, so do it now.
127 : *
128 : * Note that it's harmless to report the query_id multiple times, as the
129 : * call will be ignored if the top level query_id has already been
130 : * reported.
131 : */
132 573458 : pgstat_report_query_id(queryDesc->plannedstmt->queryId, false);
133 :
134 573458 : if (ExecutorStart_hook)
135 115312 : (*ExecutorStart_hook) (queryDesc, eflags);
136 : else
137 458146 : standard_ExecutorStart(queryDesc, eflags);
138 571788 : }
139 :
140 : void
141 573458 : standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
142 : {
143 : EState *estate;
144 : MemoryContext oldcontext;
145 :
146 : /* sanity checks: queryDesc must not be started already */
147 : Assert(queryDesc != NULL);
148 : Assert(queryDesc->estate == NULL);
149 :
150 : /* caller must ensure the query's snapshot is active */
151 : Assert(GetActiveSnapshot() == queryDesc->snapshot);
152 :
153 : /*
154 : * If the transaction is read-only, we need to check if any writes are
155 : * planned to non-temporary tables. EXPLAIN is considered read-only.
156 : *
157 : * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
158 : * would require (a) storing the combo CID hash in shared memory, rather
159 : * than synchronizing it just once at the start of parallelism, and (b) an
160 : * alternative to heap_update()'s reliance on xmax for mutual exclusion.
161 : * INSERT may have no such troubles, but we forbid it to simplify the
162 : * checks.
163 : *
164 : * We have lower-level defenses in CommandCounterIncrement and elsewhere
165 : * against performing unsafe operations in parallel mode, but this gives a
166 : * more user-friendly error message.
167 : */
168 573458 : if ((XactReadOnly || IsInParallelMode()) &&
169 62026 : !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
170 62026 : ExecCheckXactReadOnly(queryDesc->plannedstmt);
171 :
172 : /*
173 : * Build EState, switch into per-query memory context for startup.
174 : */
175 573442 : estate = CreateExecutorState();
176 573442 : queryDesc->estate = estate;
177 :
178 573442 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
179 :
180 : /*
181 : * Fill in external parameters, if any, from queryDesc; and allocate
182 : * workspace for internal parameters
183 : */
184 573442 : estate->es_param_list_info = queryDesc->params;
185 :
186 573442 : if (queryDesc->plannedstmt->paramExecTypes != NIL)
187 : {
188 : int nParamExec;
189 :
190 185708 : nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes);
191 185708 : estate->es_param_exec_vals = (ParamExecData *)
192 185708 : palloc0(nParamExec * sizeof(ParamExecData));
193 : }
194 :
195 : /* We now require all callers to provide sourceText */
196 : Assert(queryDesc->sourceText != NULL);
197 573442 : estate->es_sourceText = queryDesc->sourceText;
198 :
199 : /*
200 : * Fill in the query environment, if any, from queryDesc.
201 : */
202 573442 : estate->es_queryEnv = queryDesc->queryEnv;
203 :
204 : /*
205 : * If non-read-only query, set the command ID to mark output tuples with
206 : */
207 573442 : switch (queryDesc->operation)
208 : {
209 457178 : case CMD_SELECT:
210 :
211 : /*
212 : * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
213 : * tuples
214 : */
215 457178 : if (queryDesc->plannedstmt->rowMarks != NIL ||
216 449236 : queryDesc->plannedstmt->hasModifyingCTE)
217 8078 : estate->es_output_cid = GetCurrentCommandId(true);
218 :
219 : /*
220 : * A SELECT without modifying CTEs can't possibly queue triggers,
221 : * so force skip-triggers mode. This is just a marginal efficiency
222 : * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
223 : * all that expensive, but we might as well do it.
224 : */
225 457178 : if (!queryDesc->plannedstmt->hasModifyingCTE)
226 457036 : eflags |= EXEC_FLAG_SKIP_TRIGGERS;
227 457178 : break;
228 :
229 116264 : case CMD_INSERT:
230 : case CMD_DELETE:
231 : case CMD_UPDATE:
232 : case CMD_MERGE:
233 116264 : estate->es_output_cid = GetCurrentCommandId(true);
234 116264 : break;
235 :
236 0 : default:
237 0 : elog(ERROR, "unrecognized operation code: %d",
238 : (int) queryDesc->operation);
239 : break;
240 : }
241 :
242 : /*
243 : * Copy other important information into the EState
244 : */
245 573442 : estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
246 573442 : estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
247 573442 : estate->es_top_eflags = eflags;
248 573442 : estate->es_instrument = queryDesc->instrument_options;
249 573442 : estate->es_jit_flags = queryDesc->plannedstmt->jitFlags;
250 :
251 : /*
252 : * Set up an AFTER-trigger statement context, unless told not to, or
253 : * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
254 : */
255 573442 : if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
256 114774 : AfterTriggerBeginQuery();
257 :
258 : /*
259 : * Initialize the plan state tree
260 : */
261 573442 : InitPlan(queryDesc, eflags);
262 :
263 571788 : MemoryContextSwitchTo(oldcontext);
264 571788 : }
265 :
266 : /* ----------------------------------------------------------------
267 : * ExecutorRun
268 : *
269 : * This is the main routine of the executor module. It accepts
270 : * the query descriptor from the traffic cop and executes the
271 : * query plan.
272 : *
273 : * ExecutorStart must have been called already.
274 : *
275 : * If direction is NoMovementScanDirection then nothing is done
276 : * except to start up/shut down the destination. Otherwise,
277 : * we retrieve up to 'count' tuples in the specified direction.
278 : *
279 : * Note: count = 0 is interpreted as no portal limit, i.e., run to
280 : * completion. Also note that the count limit is only applied to
281 : * retrieved tuples, not for instance to those inserted/updated/deleted
282 : * by a ModifyTable plan node.
283 : *
284 : * There is no return value, but output tuples (if any) are sent to
285 : * the destination receiver specified in the QueryDesc; and the number
286 : * of tuples processed at the top level can be found in
287 : * estate->es_processed. The total number of tuples processed in all
288 : * the ExecutorRun calls can be found in estate->es_total_processed.
289 : *
290 : * We provide a function hook variable that lets loadable plugins
291 : * get control when ExecutorRun is called. Such a plugin would
292 : * normally call standard_ExecutorRun().
293 : *
294 : * ----------------------------------------------------------------
295 : */
296 : void
297 563776 : ExecutorRun(QueryDesc *queryDesc,
298 : ScanDirection direction, uint64 count)
299 : {
300 563776 : if (ExecutorRun_hook)
301 112260 : (*ExecutorRun_hook) (queryDesc, direction, count);
302 : else
303 451516 : standard_ExecutorRun(queryDesc, direction, count);
304 540128 : }
305 :
306 : void
307 563776 : standard_ExecutorRun(QueryDesc *queryDesc,
308 : ScanDirection direction, uint64 count)
309 : {
310 : EState *estate;
311 : CmdType operation;
312 : DestReceiver *dest;
313 : bool sendTuples;
314 : MemoryContext oldcontext;
315 :
316 : /* sanity checks */
317 : Assert(queryDesc != NULL);
318 :
319 563776 : estate = queryDesc->estate;
320 :
321 : Assert(estate != NULL);
322 : Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
323 :
324 : /* caller must ensure the query's snapshot is active */
325 : Assert(GetActiveSnapshot() == estate->es_snapshot);
326 :
327 : /*
328 : * Switch into per-query memory context
329 : */
330 563776 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
331 :
332 : /* Allow instrumentation of Executor overall runtime */
333 563776 : if (queryDesc->totaltime)
334 76624 : InstrStartNode(queryDesc->totaltime);
335 :
336 : /*
337 : * extract information from the query descriptor and the query feature.
338 : */
339 563776 : operation = queryDesc->operation;
340 563776 : dest = queryDesc->dest;
341 :
342 : /*
343 : * startup tuple receiver, if we will be emitting tuples
344 : */
345 563776 : estate->es_processed = 0;
346 :
347 678040 : sendTuples = (operation == CMD_SELECT ||
348 114264 : queryDesc->plannedstmt->hasReturning);
349 :
350 563776 : if (sendTuples)
351 453666 : dest->rStartup(dest, operation, queryDesc->tupDesc);
352 :
353 : /*
354 : * Run plan, unless direction is NoMovement.
355 : *
356 : * Note: pquery.c selects NoMovement if a prior call already reached
357 : * end-of-data in the user-specified fetch direction. This is important
358 : * because various parts of the executor can misbehave if called again
359 : * after reporting EOF. For example, heapam.c would actually restart a
360 : * heapscan and return all its data afresh. There is also some doubt
361 : * about whether a parallel plan would operate properly if an additional,
362 : * necessarily non-parallel execution request occurs after completing a
363 : * parallel execution. (That case should work, but it's untested.)
364 : */
365 563738 : if (!ScanDirectionIsNoMovement(direction))
366 562496 : ExecutePlan(queryDesc,
367 : operation,
368 : sendTuples,
369 : count,
370 : direction,
371 : dest);
372 :
373 : /*
374 : * Update es_total_processed to keep track of the number of tuples
375 : * processed across multiple ExecutorRun() calls.
376 : */
377 540128 : estate->es_total_processed += estate->es_processed;
378 :
379 : /*
380 : * shutdown tuple receiver, if we started it
381 : */
382 540128 : if (sendTuples)
383 433144 : dest->rShutdown(dest);
384 :
385 540128 : if (queryDesc->totaltime)
386 73910 : InstrStopNode(queryDesc->totaltime, estate->es_processed);
387 :
388 540128 : MemoryContextSwitchTo(oldcontext);
389 540128 : }
390 :
391 : /* ----------------------------------------------------------------
392 : * ExecutorFinish
393 : *
394 : * This routine must be called after the last ExecutorRun call.
395 : * It performs cleanup such as firing AFTER triggers. It is
396 : * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
397 : * include these actions in the total runtime.
398 : *
399 : * We provide a function hook variable that lets loadable plugins
400 : * get control when ExecutorFinish is called. Such a plugin would
401 : * normally call standard_ExecutorFinish().
402 : *
403 : * ----------------------------------------------------------------
404 : */
405 : void
406 525630 : ExecutorFinish(QueryDesc *queryDesc)
407 : {
408 525630 : if (ExecutorFinish_hook)
409 101534 : (*ExecutorFinish_hook) (queryDesc);
410 : else
411 424096 : standard_ExecutorFinish(queryDesc);
412 524530 : }
413 :
414 : void
415 525630 : standard_ExecutorFinish(QueryDesc *queryDesc)
416 : {
417 : EState *estate;
418 : MemoryContext oldcontext;
419 :
420 : /* sanity checks */
421 : Assert(queryDesc != NULL);
422 :
423 525630 : estate = queryDesc->estate;
424 :
425 : Assert(estate != NULL);
426 : Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
427 :
428 : /* This should be run once and only once per Executor instance */
429 : Assert(!estate->es_finished);
430 :
431 : /* Switch into per-query memory context */
432 525630 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
433 :
434 : /* Allow instrumentation of Executor overall runtime */
435 525630 : if (queryDesc->totaltime)
436 73910 : InstrStartNode(queryDesc->totaltime);
437 :
438 : /* Run ModifyTable nodes to completion */
439 525630 : ExecPostprocessPlan(estate);
440 :
441 : /* Execute queued AFTER triggers, unless told not to */
442 525630 : if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
443 110532 : AfterTriggerEndQuery(estate);
444 :
445 524530 : if (queryDesc->totaltime)
446 73588 : InstrStopNode(queryDesc->totaltime, 0);
447 :
448 524530 : MemoryContextSwitchTo(oldcontext);
449 :
450 524530 : estate->es_finished = true;
451 524530 : }
452 :
453 : /* ----------------------------------------------------------------
454 : * ExecutorEnd
455 : *
456 : * This routine must be called at the end of execution of any
457 : * query plan
458 : *
459 : * We provide a function hook variable that lets loadable plugins
460 : * get control when ExecutorEnd is called. Such a plugin would
461 : * normally call standard_ExecutorEnd().
462 : *
463 : * ----------------------------------------------------------------
464 : */
465 : void
466 544988 : ExecutorEnd(QueryDesc *queryDesc)
467 : {
468 544988 : if (ExecutorEnd_hook)
469 106972 : (*ExecutorEnd_hook) (queryDesc);
470 : else
471 438016 : standard_ExecutorEnd(queryDesc);
472 544988 : }
473 :
474 : void
475 544988 : standard_ExecutorEnd(QueryDesc *queryDesc)
476 : {
477 : EState *estate;
478 : MemoryContext oldcontext;
479 :
480 : /* sanity checks */
481 : Assert(queryDesc != NULL);
482 :
483 544988 : estate = queryDesc->estate;
484 :
485 : Assert(estate != NULL);
486 :
487 544988 : if (estate->es_parallel_workers_to_launch > 0)
488 682 : pgstat_update_parallel_workers_stats((PgStat_Counter) estate->es_parallel_workers_to_launch,
489 682 : (PgStat_Counter) estate->es_parallel_workers_launched);
490 :
491 : /*
492 : * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
493 : * Assert is needed because ExecutorFinish is new as of 9.1, and callers
494 : * might forget to call it.
495 : */
496 : Assert(estate->es_finished ||
497 : (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
498 :
499 : /*
500 : * Switch into per-query memory context to run ExecEndPlan
501 : */
502 544988 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
503 :
504 544988 : ExecEndPlan(queryDesc->planstate, estate);
505 :
506 : /* do away with our snapshots */
507 544988 : UnregisterSnapshot(estate->es_snapshot);
508 544988 : UnregisterSnapshot(estate->es_crosscheck_snapshot);
509 :
510 : /*
511 : * Must switch out of context before destroying it
512 : */
513 544988 : MemoryContextSwitchTo(oldcontext);
514 :
515 : /*
516 : * Release EState and per-query memory context. This should release
517 : * everything the executor has allocated.
518 : */
519 544988 : FreeExecutorState(estate);
520 :
521 : /* Reset queryDesc fields that no longer point to anything */
522 544988 : queryDesc->tupDesc = NULL;
523 544988 : queryDesc->estate = NULL;
524 544988 : queryDesc->planstate = NULL;
525 544988 : queryDesc->totaltime = NULL;
526 544988 : }
527 :
528 : /* ----------------------------------------------------------------
529 : * ExecutorRewind
530 : *
531 : * This routine may be called on an open queryDesc to rewind it
532 : * to the start.
533 : * ----------------------------------------------------------------
534 : */
535 : void
536 108 : ExecutorRewind(QueryDesc *queryDesc)
537 : {
538 : EState *estate;
539 : MemoryContext oldcontext;
540 :
541 : /* sanity checks */
542 : Assert(queryDesc != NULL);
543 :
544 108 : estate = queryDesc->estate;
545 :
546 : Assert(estate != NULL);
547 :
548 : /* It's probably not sensible to rescan updating queries */
549 : Assert(queryDesc->operation == CMD_SELECT);
550 :
551 : /*
552 : * Switch into per-query memory context
553 : */
554 108 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
555 :
556 : /*
557 : * rescan plan
558 : */
559 108 : ExecReScan(queryDesc->planstate);
560 :
561 108 : MemoryContextSwitchTo(oldcontext);
562 108 : }
563 :
564 :
565 : /*
566 : * ExecCheckPermissions
567 : * Check access permissions of relations mentioned in a query
568 : *
569 : * Returns true if permissions are adequate. Otherwise, throws an appropriate
570 : * error if ereport_on_violation is true, or simply returns false otherwise.
571 : *
572 : * Note that this does NOT address row-level security policies (aka: RLS). If
573 : * rows will be returned to the user as a result of this permission check
574 : * passing, then RLS also needs to be consulted (and check_enable_rls()).
575 : *
576 : * See rewrite/rowsecurity.c.
577 : *
578 : * NB: rangeTable is no longer used by us, but kept around for the hooks that
579 : * might still want to look at the RTEs.
580 : */
581 : bool
582 585420 : ExecCheckPermissions(List *rangeTable, List *rteperminfos,
583 : bool ereport_on_violation)
584 : {
585 : ListCell *l;
586 585420 : bool result = true;
587 :
588 : #ifdef USE_ASSERT_CHECKING
589 : Bitmapset *indexset = NULL;
590 :
591 : /* Check that rteperminfos is consistent with rangeTable */
592 : foreach(l, rangeTable)
593 : {
594 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
595 :
596 : if (rte->perminfoindex != 0)
597 : {
598 : /* Sanity checks */
599 :
600 : /*
601 : * Only relation RTEs and subquery RTEs that were once relation
602 : * RTEs (views) have their perminfoindex set.
603 : */
604 : Assert(rte->rtekind == RTE_RELATION ||
605 : (rte->rtekind == RTE_SUBQUERY &&
606 : rte->relkind == RELKIND_VIEW));
607 :
608 : (void) getRTEPermissionInfo(rteperminfos, rte);
609 : /* Many-to-one mapping not allowed */
610 : Assert(!bms_is_member(rte->perminfoindex, indexset));
611 : indexset = bms_add_member(indexset, rte->perminfoindex);
612 : }
613 : }
614 :
615 : /* All rteperminfos are referenced */
616 : Assert(bms_num_members(indexset) == list_length(rteperminfos));
617 : #endif
618 :
619 1138226 : foreach(l, rteperminfos)
620 : {
621 554118 : RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
622 :
623 : Assert(OidIsValid(perminfo->relid));
624 554118 : result = ExecCheckOneRelPerms(perminfo);
625 554118 : if (!result)
626 : {
627 1312 : if (ereport_on_violation)
628 1300 : aclcheck_error(ACLCHECK_NO_PRIV,
629 1300 : get_relkind_objtype(get_rel_relkind(perminfo->relid)),
630 1300 : get_rel_name(perminfo->relid));
631 12 : return false;
632 : }
633 : }
634 :
635 584108 : if (ExecutorCheckPerms_hook)
636 12 : result = (*ExecutorCheckPerms_hook) (rangeTable, rteperminfos,
637 : ereport_on_violation);
638 584108 : return result;
639 : }
640 :
641 : /*
642 : * ExecCheckOneRelPerms
643 : * Check access permissions for a single relation.
644 : */
645 : bool
646 574156 : ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
647 : {
648 : AclMode requiredPerms;
649 : AclMode relPerms;
650 : AclMode remainingPerms;
651 : Oid userid;
652 574156 : Oid relOid = perminfo->relid;
653 :
654 574156 : requiredPerms = perminfo->requiredPerms;
655 : Assert(requiredPerms != 0);
656 :
657 : /*
658 : * userid to check as: current user unless we have a setuid indication.
659 : *
660 : * Note: GetUserId() is presently fast enough that there's no harm in
661 : * calling it separately for each relation. If that stops being true, we
662 : * could call it once in ExecCheckPermissions and pass the userid down
663 : * from there. But for now, no need for the extra clutter.
664 : */
665 1148312 : userid = OidIsValid(perminfo->checkAsUser) ?
666 574156 : perminfo->checkAsUser : GetUserId();
667 :
668 : /*
669 : * We must have *all* the requiredPerms bits, but some of the bits can be
670 : * satisfied from column-level rather than relation-level permissions.
671 : * First, remove any bits that are satisfied by relation permissions.
672 : */
673 574156 : relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
674 574156 : remainingPerms = requiredPerms & ~relPerms;
675 574156 : if (remainingPerms != 0)
676 : {
677 2870 : int col = -1;
678 :
679 : /*
680 : * If we lack any permissions that exist only as relation permissions,
681 : * we can fail straight away.
682 : */
683 2870 : if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
684 156 : return false;
685 :
686 : /*
687 : * Check to see if we have the needed privileges at column level.
688 : *
689 : * Note: failures just report a table-level error; it would be nicer
690 : * to report a column-level error if we have some but not all of the
691 : * column privileges.
692 : */
693 2714 : if (remainingPerms & ACL_SELECT)
694 : {
695 : /*
696 : * When the query doesn't explicitly reference any columns (for
697 : * example, SELECT COUNT(*) FROM table), allow the query if we
698 : * have SELECT on any column of the rel, as per SQL spec.
699 : */
700 1506 : if (bms_is_empty(perminfo->selectedCols))
701 : {
702 54 : if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
703 : ACLMASK_ANY) != ACLCHECK_OK)
704 12 : return false;
705 : }
706 :
707 2426 : while ((col = bms_next_member(perminfo->selectedCols, col)) >= 0)
708 : {
709 : /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
710 1902 : AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
711 :
712 1902 : if (attno == InvalidAttrNumber)
713 : {
714 : /* Whole-row reference, must have priv on all cols */
715 66 : if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
716 : ACLMASK_ALL) != ACLCHECK_OK)
717 42 : return false;
718 : }
719 : else
720 : {
721 1836 : if (pg_attribute_aclcheck(relOid, attno, userid,
722 : ACL_SELECT) != ACLCHECK_OK)
723 928 : return false;
724 : }
725 : }
726 : }
727 :
728 : /*
729 : * Basically the same for the mod columns, for both INSERT and UPDATE
730 : * privilege as specified by remainingPerms.
731 : */
732 1732 : if (remainingPerms & ACL_INSERT &&
733 308 : !ExecCheckPermissionsModified(relOid,
734 : userid,
735 : perminfo->insertedCols,
736 : ACL_INSERT))
737 176 : return false;
738 :
739 1556 : if (remainingPerms & ACL_UPDATE &&
740 1140 : !ExecCheckPermissionsModified(relOid,
741 : userid,
742 : perminfo->updatedCols,
743 : ACL_UPDATE))
744 384 : return false;
745 : }
746 572458 : return true;
747 : }
748 :
749 : /*
750 : * ExecCheckPermissionsModified
751 : * Check INSERT or UPDATE access permissions for a single relation (these
752 : * are processed uniformly).
753 : */
754 : static bool
755 1448 : ExecCheckPermissionsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
756 : AclMode requiredPerms)
757 : {
758 1448 : int col = -1;
759 :
760 : /*
761 : * When the query doesn't explicitly update any columns, allow the query
762 : * if we have permission on any column of the rel. This is to handle
763 : * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
764 : */
765 1448 : if (bms_is_empty(modifiedCols))
766 : {
767 48 : if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
768 : ACLMASK_ANY) != ACLCHECK_OK)
769 48 : return false;
770 : }
771 :
772 2438 : while ((col = bms_next_member(modifiedCols, col)) >= 0)
773 : {
774 : /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
775 1550 : AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
776 :
777 1550 : if (attno == InvalidAttrNumber)
778 : {
779 : /* whole-row reference can't happen here */
780 0 : elog(ERROR, "whole-row update is not implemented");
781 : }
782 : else
783 : {
784 1550 : if (pg_attribute_aclcheck(relOid, attno, userid,
785 : requiredPerms) != ACLCHECK_OK)
786 512 : return false;
787 : }
788 : }
789 888 : return true;
790 : }
791 :
792 : /*
793 : * Check that the query does not imply any writes to non-temp tables;
794 : * unless we're in parallel mode, in which case don't even allow writes
795 : * to temp tables.
796 : *
797 : * Note: in a Hot Standby this would need to reject writes to temp
798 : * tables just as we do in parallel mode; but an HS standby can't have created
799 : * any temp tables in the first place, so no need to check that.
800 : */
801 : static void
802 62026 : ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
803 : {
804 : ListCell *l;
805 :
806 : /*
807 : * Fail if write permissions are requested in parallel mode for table
808 : * (temp or non-temp), otherwise fail for any non-temp table.
809 : */
810 162618 : foreach(l, plannedstmt->permInfos)
811 : {
812 100608 : RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
813 :
814 100608 : if ((perminfo->requiredPerms & (~ACL_SELECT)) == 0)
815 100580 : continue;
816 :
817 28 : if (isTempNamespace(get_rel_namespace(perminfo->relid)))
818 12 : continue;
819 :
820 16 : PreventCommandIfReadOnly(CreateCommandName((Node *) plannedstmt));
821 : }
822 :
823 62010 : if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
824 12 : PreventCommandIfParallelMode(CreateCommandName((Node *) plannedstmt));
825 62010 : }
826 :
827 :
828 : /* ----------------------------------------------------------------
829 : * InitPlan
830 : *
831 : * Initializes the query plan: open files, allocate storage
832 : * and start up the rule manager
833 : * ----------------------------------------------------------------
834 : */
835 : static void
836 573442 : InitPlan(QueryDesc *queryDesc, int eflags)
837 : {
838 573442 : CmdType operation = queryDesc->operation;
839 573442 : PlannedStmt *plannedstmt = queryDesc->plannedstmt;
840 573442 : Plan *plan = plannedstmt->planTree;
841 573442 : List *rangeTable = plannedstmt->rtable;
842 573442 : EState *estate = queryDesc->estate;
843 : PlanState *planstate;
844 : TupleDesc tupType;
845 : ListCell *l;
846 : int i;
847 :
848 : /*
849 : * Do permissions checks
850 : */
851 573442 : ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true);
852 :
853 : /*
854 : * initialize the node's execution state
855 : */
856 572226 : ExecInitRangeTable(estate, rangeTable, plannedstmt->permInfos,
857 572226 : bms_copy(plannedstmt->unprunableRelids));
858 :
859 572226 : estate->es_plannedstmt = plannedstmt;
860 572226 : estate->es_part_prune_infos = plannedstmt->partPruneInfos;
861 :
862 : /*
863 : * Perform runtime "initial" pruning to identify which child subplans,
864 : * corresponding to the children of plan nodes that contain
865 : * PartitionPruneInfo such as Append, will not be executed. The results,
866 : * which are bitmapsets of indexes of the child subplans that will be
867 : * executed, are saved in es_part_prune_results. These results correspond
868 : * to each PartitionPruneInfo entry, and the es_part_prune_results list is
869 : * parallel to es_part_prune_infos.
870 : */
871 572226 : ExecDoInitialPruning(estate);
872 :
873 : /*
874 : * Next, build the ExecRowMark array from the PlanRowMark(s), if any.
875 : */
876 572226 : if (plannedstmt->rowMarks)
877 : {
878 10008 : estate->es_rowmarks = (ExecRowMark **)
879 10008 : palloc0(estate->es_range_table_size * sizeof(ExecRowMark *));
880 23380 : foreach(l, plannedstmt->rowMarks)
881 : {
882 13378 : PlanRowMark *rc = (PlanRowMark *) lfirst(l);
883 : Oid relid;
884 : Relation relation;
885 : ExecRowMark *erm;
886 :
887 : /*
888 : * Ignore "parent" rowmarks, because they are irrelevant at
889 : * runtime. Also ignore the rowmarks belonging to child tables
890 : * that have been pruned in ExecDoInitialPruning().
891 : */
892 13378 : if (rc->isParent ||
893 11468 : !bms_is_member(rc->rti, estate->es_unpruned_relids))
894 2438 : continue;
895 :
896 : /* get relation's OID (will produce InvalidOid if subquery) */
897 10940 : relid = exec_rt_fetch(rc->rti, estate)->relid;
898 :
899 : /* open relation, if we need to access it for this mark type */
900 10940 : switch (rc->markType)
901 : {
902 10616 : case ROW_MARK_EXCLUSIVE:
903 : case ROW_MARK_NOKEYEXCLUSIVE:
904 : case ROW_MARK_SHARE:
905 : case ROW_MARK_KEYSHARE:
906 : case ROW_MARK_REFERENCE:
907 10616 : relation = ExecGetRangeTableRelation(estate, rc->rti, false);
908 10616 : break;
909 324 : case ROW_MARK_COPY:
910 : /* no physical table access is required */
911 324 : relation = NULL;
912 324 : break;
913 0 : default:
914 0 : elog(ERROR, "unrecognized markType: %d", rc->markType);
915 : relation = NULL; /* keep compiler quiet */
916 : break;
917 : }
918 :
919 : /* Check that relation is a legal target for marking */
920 10940 : if (relation)
921 10616 : CheckValidRowMarkRel(relation, rc->markType);
922 :
923 10934 : erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
924 10934 : erm->relation = relation;
925 10934 : erm->relid = relid;
926 10934 : erm->rti = rc->rti;
927 10934 : erm->prti = rc->prti;
928 10934 : erm->rowmarkId = rc->rowmarkId;
929 10934 : erm->markType = rc->markType;
930 10934 : erm->strength = rc->strength;
931 10934 : erm->waitPolicy = rc->waitPolicy;
932 10934 : erm->ermActive = false;
933 10934 : ItemPointerSetInvalid(&(erm->curCtid));
934 10934 : erm->ermExtra = NULL;
935 :
936 : Assert(erm->rti > 0 && erm->rti <= estate->es_range_table_size &&
937 : estate->es_rowmarks[erm->rti - 1] == NULL);
938 :
939 10934 : estate->es_rowmarks[erm->rti - 1] = erm;
940 : }
941 : }
942 :
943 : /*
944 : * Initialize the executor's tuple table to empty.
945 : */
946 572220 : estate->es_tupleTable = NIL;
947 :
948 : /* signal that this EState is not used for EPQ */
949 572220 : estate->es_epq_active = NULL;
950 :
951 : /*
952 : * Initialize private state information for each SubPlan. We must do this
953 : * before running ExecInitNode on the main query tree, since
954 : * ExecInitSubPlan expects to be able to find these entries.
955 : */
956 : Assert(estate->es_subplanstates == NIL);
957 572220 : i = 1; /* subplan indices count from 1 */
958 616774 : foreach(l, plannedstmt->subplans)
959 : {
960 44554 : Plan *subplan = (Plan *) lfirst(l);
961 : PlanState *subplanstate;
962 : int sp_eflags;
963 :
964 : /*
965 : * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
966 : * it is a parameterless subplan (not initplan), we suggest that it be
967 : * prepared to handle REWIND efficiently; otherwise there is no need.
968 : */
969 44554 : sp_eflags = eflags
970 : & ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK);
971 44554 : if (bms_is_member(i, plannedstmt->rewindPlanIDs))
972 42 : sp_eflags |= EXEC_FLAG_REWIND;
973 :
974 44554 : subplanstate = ExecInitNode(subplan, estate, sp_eflags);
975 :
976 44554 : estate->es_subplanstates = lappend(estate->es_subplanstates,
977 : subplanstate);
978 :
979 44554 : i++;
980 : }
981 :
982 : /*
983 : * Initialize the private state information for all the nodes in the query
984 : * tree. This opens files, allocates storage and leaves us ready to start
985 : * processing tuples.
986 : */
987 572220 : planstate = ExecInitNode(plan, estate, eflags);
988 :
989 : /*
990 : * Get the tuple descriptor describing the type of tuples to return.
991 : */
992 571788 : tupType = ExecGetResultType(planstate);
993 :
994 : /*
995 : * Initialize the junk filter if needed. SELECT queries need a filter if
996 : * there are any junk attrs in the top-level tlist.
997 : */
998 571788 : if (operation == CMD_SELECT)
999 : {
1000 456510 : bool junk_filter_needed = false;
1001 : ListCell *tlist;
1002 :
1003 1685822 : foreach(tlist, plan->targetlist)
1004 : {
1005 1254446 : TargetEntry *tle = (TargetEntry *) lfirst(tlist);
1006 :
1007 1254446 : if (tle->resjunk)
1008 : {
1009 25134 : junk_filter_needed = true;
1010 25134 : break;
1011 : }
1012 : }
1013 :
1014 456510 : if (junk_filter_needed)
1015 : {
1016 : JunkFilter *j;
1017 : TupleTableSlot *slot;
1018 :
1019 25134 : slot = ExecInitExtraTupleSlot(estate, NULL, &TTSOpsVirtual);
1020 25134 : j = ExecInitJunkFilter(planstate->plan->targetlist,
1021 : slot);
1022 25134 : estate->es_junkFilter = j;
1023 :
1024 : /* Want to return the cleaned tuple type */
1025 25134 : tupType = j->jf_cleanTupType;
1026 : }
1027 : }
1028 :
1029 571788 : queryDesc->tupDesc = tupType;
1030 571788 : queryDesc->planstate = planstate;
1031 571788 : }
1032 :
1033 : /*
1034 : * Check that a proposed result relation is a legal target for the operation
1035 : *
1036 : * Generally the parser and/or planner should have noticed any such mistake
1037 : * already, but let's make sure.
1038 : *
1039 : * For MERGE, mergeActions is the list of actions that may be performed. The
1040 : * result relation is required to support every action, regardless of whether
1041 : * or not they are all executed.
1042 : *
1043 : * Note: when changing this function, you probably also need to look at
1044 : * CheckValidRowMarkRel.
1045 : */
1046 : void
1047 126940 : CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation,
1048 : List *mergeActions)
1049 : {
1050 126940 : Relation resultRel = resultRelInfo->ri_RelationDesc;
1051 : FdwRoutine *fdwroutine;
1052 :
1053 : /* Expect a fully-formed ResultRelInfo from InitResultRelInfo(). */
1054 : Assert(resultRelInfo->ri_needLockTagTuple ==
1055 : IsInplaceUpdateRelation(resultRel));
1056 :
1057 126940 : switch (resultRel->rd_rel->relkind)
1058 : {
1059 125706 : case RELKIND_RELATION:
1060 : case RELKIND_PARTITIONED_TABLE:
1061 125706 : CheckCmdReplicaIdentity(resultRel, operation);
1062 125398 : break;
1063 0 : case RELKIND_SEQUENCE:
1064 0 : ereport(ERROR,
1065 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1066 : errmsg("cannot change sequence \"%s\"",
1067 : RelationGetRelationName(resultRel))));
1068 : break;
1069 0 : case RELKIND_TOASTVALUE:
1070 0 : ereport(ERROR,
1071 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1072 : errmsg("cannot change TOAST relation \"%s\"",
1073 : RelationGetRelationName(resultRel))));
1074 : break;
1075 420 : case RELKIND_VIEW:
1076 :
1077 : /*
1078 : * Okay only if there's a suitable INSTEAD OF trigger. Otherwise,
1079 : * complain, but omit errdetail because we haven't got the
1080 : * information handy (and given that it really shouldn't happen,
1081 : * it's not worth great exertion to get).
1082 : */
1083 420 : if (!view_has_instead_trigger(resultRel, operation, mergeActions))
1084 0 : error_view_not_updatable(resultRel, operation, mergeActions,
1085 : NULL);
1086 420 : break;
1087 120 : case RELKIND_MATVIEW:
1088 120 : if (!MatViewIncrementalMaintenanceIsEnabled())
1089 0 : ereport(ERROR,
1090 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1091 : errmsg("cannot change materialized view \"%s\"",
1092 : RelationGetRelationName(resultRel))));
1093 120 : break;
1094 694 : case RELKIND_FOREIGN_TABLE:
1095 : /* Okay only if the FDW supports it */
1096 694 : fdwroutine = resultRelInfo->ri_FdwRoutine;
1097 : switch (operation)
1098 : {
1099 314 : case CMD_INSERT:
1100 314 : if (fdwroutine->ExecForeignInsert == NULL)
1101 10 : ereport(ERROR,
1102 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1103 : errmsg("cannot insert into foreign table \"%s\"",
1104 : RelationGetRelationName(resultRel))));
1105 304 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1106 304 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1107 0 : ereport(ERROR,
1108 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1109 : errmsg("foreign table \"%s\" does not allow inserts",
1110 : RelationGetRelationName(resultRel))));
1111 304 : break;
1112 214 : case CMD_UPDATE:
1113 214 : if (fdwroutine->ExecForeignUpdate == NULL)
1114 4 : ereport(ERROR,
1115 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1116 : errmsg("cannot update foreign table \"%s\"",
1117 : RelationGetRelationName(resultRel))));
1118 210 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1119 210 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1120 0 : ereport(ERROR,
1121 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1122 : errmsg("foreign table \"%s\" does not allow updates",
1123 : RelationGetRelationName(resultRel))));
1124 210 : break;
1125 166 : case CMD_DELETE:
1126 166 : if (fdwroutine->ExecForeignDelete == NULL)
1127 4 : ereport(ERROR,
1128 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1129 : errmsg("cannot delete from foreign table \"%s\"",
1130 : RelationGetRelationName(resultRel))));
1131 162 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1132 162 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1133 0 : ereport(ERROR,
1134 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1135 : errmsg("foreign table \"%s\" does not allow deletes",
1136 : RelationGetRelationName(resultRel))));
1137 162 : break;
1138 0 : default:
1139 0 : elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1140 : break;
1141 : }
1142 676 : break;
1143 0 : default:
1144 0 : ereport(ERROR,
1145 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1146 : errmsg("cannot change relation \"%s\"",
1147 : RelationGetRelationName(resultRel))));
1148 : break;
1149 : }
1150 126614 : }
1151 :
1152 : /*
1153 : * Check that a proposed rowmark target relation is a legal target
1154 : *
1155 : * In most cases parser and/or planner should have noticed this already, but
1156 : * they don't cover all cases.
1157 : */
1158 : static void
1159 10616 : CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1160 : {
1161 : FdwRoutine *fdwroutine;
1162 :
1163 10616 : switch (rel->rd_rel->relkind)
1164 : {
1165 10604 : case RELKIND_RELATION:
1166 : case RELKIND_PARTITIONED_TABLE:
1167 : /* OK */
1168 10604 : break;
1169 0 : case RELKIND_SEQUENCE:
1170 : /* Must disallow this because we don't vacuum sequences */
1171 0 : ereport(ERROR,
1172 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1173 : errmsg("cannot lock rows in sequence \"%s\"",
1174 : RelationGetRelationName(rel))));
1175 : break;
1176 0 : case RELKIND_TOASTVALUE:
1177 : /* We could allow this, but there seems no good reason to */
1178 0 : ereport(ERROR,
1179 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1180 : errmsg("cannot lock rows in TOAST relation \"%s\"",
1181 : RelationGetRelationName(rel))));
1182 : break;
1183 0 : case RELKIND_VIEW:
1184 : /* Should not get here; planner should have expanded the view */
1185 0 : ereport(ERROR,
1186 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1187 : errmsg("cannot lock rows in view \"%s\"",
1188 : RelationGetRelationName(rel))));
1189 : break;
1190 12 : case RELKIND_MATVIEW:
1191 : /* Allow referencing a matview, but not actual locking clauses */
1192 12 : if (markType != ROW_MARK_REFERENCE)
1193 6 : ereport(ERROR,
1194 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1195 : errmsg("cannot lock rows in materialized view \"%s\"",
1196 : RelationGetRelationName(rel))));
1197 6 : break;
1198 0 : case RELKIND_FOREIGN_TABLE:
1199 : /* Okay only if the FDW supports it */
1200 0 : fdwroutine = GetFdwRoutineForRelation(rel, false);
1201 0 : if (fdwroutine->RefetchForeignRow == NULL)
1202 0 : ereport(ERROR,
1203 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1204 : errmsg("cannot lock rows in foreign table \"%s\"",
1205 : RelationGetRelationName(rel))));
1206 0 : break;
1207 0 : default:
1208 0 : ereport(ERROR,
1209 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1210 : errmsg("cannot lock rows in relation \"%s\"",
1211 : RelationGetRelationName(rel))));
1212 : break;
1213 : }
1214 10610 : }
1215 :
1216 : /*
1217 : * Initialize ResultRelInfo data for one result relation
1218 : *
1219 : * Caution: before Postgres 9.1, this function included the relkind checking
1220 : * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1221 : * appropriate. Be sure callers cover those needs.
1222 : */
1223 : void
1224 432108 : InitResultRelInfo(ResultRelInfo *resultRelInfo,
1225 : Relation resultRelationDesc,
1226 : Index resultRelationIndex,
1227 : ResultRelInfo *partition_root_rri,
1228 : int instrument_options)
1229 : {
1230 22037508 : MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1231 432108 : resultRelInfo->type = T_ResultRelInfo;
1232 432108 : resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1233 432108 : resultRelInfo->ri_RelationDesc = resultRelationDesc;
1234 432108 : resultRelInfo->ri_NumIndices = 0;
1235 432108 : resultRelInfo->ri_IndexRelationDescs = NULL;
1236 432108 : resultRelInfo->ri_IndexRelationInfo = NULL;
1237 432108 : resultRelInfo->ri_needLockTagTuple =
1238 432108 : IsInplaceUpdateRelation(resultRelationDesc);
1239 : /* make a copy so as not to depend on relcache info not changing... */
1240 432108 : resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1241 432108 : if (resultRelInfo->ri_TrigDesc)
1242 : {
1243 17192 : int n = resultRelInfo->ri_TrigDesc->numtriggers;
1244 :
1245 17192 : resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1246 17192 : palloc0(n * sizeof(FmgrInfo));
1247 17192 : resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1248 17192 : palloc0(n * sizeof(ExprState *));
1249 17192 : if (instrument_options)
1250 0 : resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options, false);
1251 : }
1252 : else
1253 : {
1254 414916 : resultRelInfo->ri_TrigFunctions = NULL;
1255 414916 : resultRelInfo->ri_TrigWhenExprs = NULL;
1256 414916 : resultRelInfo->ri_TrigInstrument = NULL;
1257 : }
1258 432108 : if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1259 716 : resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1260 : else
1261 431392 : resultRelInfo->ri_FdwRoutine = NULL;
1262 :
1263 : /* The following fields are set later if needed */
1264 432108 : resultRelInfo->ri_RowIdAttNo = 0;
1265 432108 : resultRelInfo->ri_extraUpdatedCols = NULL;
1266 432108 : resultRelInfo->ri_projectNew = NULL;
1267 432108 : resultRelInfo->ri_newTupleSlot = NULL;
1268 432108 : resultRelInfo->ri_oldTupleSlot = NULL;
1269 432108 : resultRelInfo->ri_projectNewInfoValid = false;
1270 432108 : resultRelInfo->ri_FdwState = NULL;
1271 432108 : resultRelInfo->ri_usesFdwDirectModify = false;
1272 432108 : resultRelInfo->ri_CheckConstraintExprs = NULL;
1273 432108 : resultRelInfo->ri_GenVirtualNotNullConstraintExprs = NULL;
1274 432108 : resultRelInfo->ri_GeneratedExprsI = NULL;
1275 432108 : resultRelInfo->ri_GeneratedExprsU = NULL;
1276 432108 : resultRelInfo->ri_projectReturning = NULL;
1277 432108 : resultRelInfo->ri_onConflictArbiterIndexes = NIL;
1278 432108 : resultRelInfo->ri_onConflict = NULL;
1279 432108 : resultRelInfo->ri_ReturningSlot = NULL;
1280 432108 : resultRelInfo->ri_TrigOldSlot = NULL;
1281 432108 : resultRelInfo->ri_TrigNewSlot = NULL;
1282 432108 : resultRelInfo->ri_AllNullSlot = NULL;
1283 432108 : resultRelInfo->ri_MergeActions[MERGE_WHEN_MATCHED] = NIL;
1284 432108 : resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_SOURCE] = NIL;
1285 432108 : resultRelInfo->ri_MergeActions[MERGE_WHEN_NOT_MATCHED_BY_TARGET] = NIL;
1286 432108 : resultRelInfo->ri_MergeJoinCondition = NULL;
1287 :
1288 : /*
1289 : * Only ExecInitPartitionInfo() and ExecInitPartitionDispatchInfo() pass
1290 : * non-NULL partition_root_rri. For child relations that are part of the
1291 : * initial query rather than being dynamically added by tuple routing,
1292 : * this field is filled in ExecInitModifyTable().
1293 : */
1294 432108 : resultRelInfo->ri_RootResultRelInfo = partition_root_rri;
1295 : /* Set by ExecGetRootToChildMap */
1296 432108 : resultRelInfo->ri_RootToChildMap = NULL;
1297 432108 : resultRelInfo->ri_RootToChildMapValid = false;
1298 : /* Set by ExecInitRoutingInfo */
1299 432108 : resultRelInfo->ri_PartitionTupleSlot = NULL;
1300 432108 : resultRelInfo->ri_ChildToRootMap = NULL;
1301 432108 : resultRelInfo->ri_ChildToRootMapValid = false;
1302 432108 : resultRelInfo->ri_CopyMultiInsertBuffer = NULL;
1303 432108 : }
1304 :
1305 : /*
1306 : * ExecGetTriggerResultRel
1307 : * Get a ResultRelInfo for a trigger target relation.
1308 : *
1309 : * Most of the time, triggers are fired on one of the result relations of the
1310 : * query, and so we can just return a member of the es_result_relations array,
1311 : * or the es_tuple_routing_result_relations list (if any). (Note: in self-join
1312 : * situations there might be multiple members with the same OID; if so it
1313 : * doesn't matter which one we pick.)
1314 : *
1315 : * However, it is sometimes necessary to fire triggers on other relations;
1316 : * this happens mainly when an RI update trigger queues additional triggers
1317 : * on other relations, which will be processed in the context of the outer
1318 : * query. For efficiency's sake, we want to have a ResultRelInfo for those
1319 : * triggers too; that can avoid repeated re-opening of the relation. (It
1320 : * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1321 : * triggers.) So we make additional ResultRelInfo's as needed, and save them
1322 : * in es_trig_target_relations.
1323 : */
1324 : ResultRelInfo *
1325 8154 : ExecGetTriggerResultRel(EState *estate, Oid relid,
1326 : ResultRelInfo *rootRelInfo)
1327 : {
1328 : ResultRelInfo *rInfo;
1329 : ListCell *l;
1330 : Relation rel;
1331 : MemoryContext oldcontext;
1332 :
1333 : /* Search through the query result relations */
1334 10394 : foreach(l, estate->es_opened_result_relations)
1335 : {
1336 8940 : rInfo = lfirst(l);
1337 8940 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1338 6700 : return rInfo;
1339 : }
1340 :
1341 : /*
1342 : * Search through the result relations that were created during tuple
1343 : * routing, if any.
1344 : */
1345 1668 : foreach(l, estate->es_tuple_routing_result_relations)
1346 : {
1347 910 : rInfo = (ResultRelInfo *) lfirst(l);
1348 910 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1349 696 : return rInfo;
1350 : }
1351 :
1352 : /* Nope, but maybe we already made an extra ResultRelInfo for it */
1353 1100 : foreach(l, estate->es_trig_target_relations)
1354 : {
1355 372 : rInfo = (ResultRelInfo *) lfirst(l);
1356 372 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1357 30 : return rInfo;
1358 : }
1359 : /* Nope, so we need a new one */
1360 :
1361 : /*
1362 : * Open the target relation's relcache entry. We assume that an
1363 : * appropriate lock is still held by the backend from whenever the trigger
1364 : * event got queued, so we need take no new lock here. Also, we need not
1365 : * recheck the relkind, so no need for CheckValidResultRel.
1366 : */
1367 728 : rel = table_open(relid, NoLock);
1368 :
1369 : /*
1370 : * Make the new entry in the right context.
1371 : */
1372 728 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1373 728 : rInfo = makeNode(ResultRelInfo);
1374 728 : InitResultRelInfo(rInfo,
1375 : rel,
1376 : 0, /* dummy rangetable index */
1377 : rootRelInfo,
1378 : estate->es_instrument);
1379 728 : estate->es_trig_target_relations =
1380 728 : lappend(estate->es_trig_target_relations, rInfo);
1381 728 : MemoryContextSwitchTo(oldcontext);
1382 :
1383 : /*
1384 : * Currently, we don't need any index information in ResultRelInfos used
1385 : * only for triggers, so no need to call ExecOpenIndices.
1386 : */
1387 :
1388 728 : return rInfo;
1389 : }
1390 :
1391 : /*
1392 : * Return the ancestor relations of a given leaf partition result relation
1393 : * up to and including the query's root target relation.
1394 : *
1395 : * These work much like the ones opened by ExecGetTriggerResultRel, except
1396 : * that we need to keep them in a separate list.
1397 : *
1398 : * These are closed by ExecCloseResultRelations.
1399 : */
1400 : List *
1401 300 : ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo)
1402 : {
1403 300 : ResultRelInfo *rootRelInfo = resultRelInfo->ri_RootResultRelInfo;
1404 300 : Relation partRel = resultRelInfo->ri_RelationDesc;
1405 : Oid rootRelOid;
1406 :
1407 300 : if (!partRel->rd_rel->relispartition)
1408 0 : elog(ERROR, "cannot find ancestors of a non-partition result relation");
1409 : Assert(rootRelInfo != NULL);
1410 300 : rootRelOid = RelationGetRelid(rootRelInfo->ri_RelationDesc);
1411 300 : if (resultRelInfo->ri_ancestorResultRels == NIL)
1412 : {
1413 : ListCell *lc;
1414 234 : List *oids = get_partition_ancestors(RelationGetRelid(partRel));
1415 234 : List *ancResultRels = NIL;
1416 :
1417 300 : foreach(lc, oids)
1418 : {
1419 300 : Oid ancOid = lfirst_oid(lc);
1420 : Relation ancRel;
1421 : ResultRelInfo *rInfo;
1422 :
1423 : /*
1424 : * Ignore the root ancestor here, and use ri_RootResultRelInfo
1425 : * (below) for it instead. Also, we stop climbing up the
1426 : * hierarchy when we find the table that was mentioned in the
1427 : * query.
1428 : */
1429 300 : if (ancOid == rootRelOid)
1430 234 : break;
1431 :
1432 : /*
1433 : * All ancestors up to the root target relation must have been
1434 : * locked by the planner or AcquireExecutorLocks().
1435 : */
1436 66 : ancRel = table_open(ancOid, NoLock);
1437 66 : rInfo = makeNode(ResultRelInfo);
1438 :
1439 : /* dummy rangetable index */
1440 66 : InitResultRelInfo(rInfo, ancRel, 0, NULL,
1441 : estate->es_instrument);
1442 66 : ancResultRels = lappend(ancResultRels, rInfo);
1443 : }
1444 234 : ancResultRels = lappend(ancResultRels, rootRelInfo);
1445 234 : resultRelInfo->ri_ancestorResultRels = ancResultRels;
1446 : }
1447 :
1448 : /* We must have found some ancestor */
1449 : Assert(resultRelInfo->ri_ancestorResultRels != NIL);
1450 :
1451 300 : return resultRelInfo->ri_ancestorResultRels;
1452 : }
1453 :
1454 : /* ----------------------------------------------------------------
1455 : * ExecPostprocessPlan
1456 : *
1457 : * Give plan nodes a final chance to execute before shutdown
1458 : * ----------------------------------------------------------------
1459 : */
1460 : static void
1461 525630 : ExecPostprocessPlan(EState *estate)
1462 : {
1463 : ListCell *lc;
1464 :
1465 : /*
1466 : * Make sure nodes run forward.
1467 : */
1468 525630 : estate->es_direction = ForwardScanDirection;
1469 :
1470 : /*
1471 : * Run any secondary ModifyTable nodes to completion, in case the main
1472 : * query did not fetch all rows from them. (We do this to ensure that
1473 : * such nodes have predictable results.)
1474 : */
1475 526476 : foreach(lc, estate->es_auxmodifytables)
1476 : {
1477 846 : PlanState *ps = (PlanState *) lfirst(lc);
1478 :
1479 : for (;;)
1480 138 : {
1481 : TupleTableSlot *slot;
1482 :
1483 : /* Reset the per-output-tuple exprcontext each time */
1484 984 : ResetPerTupleExprContext(estate);
1485 :
1486 984 : slot = ExecProcNode(ps);
1487 :
1488 984 : if (TupIsNull(slot))
1489 : break;
1490 : }
1491 : }
1492 525630 : }
1493 :
1494 : /* ----------------------------------------------------------------
1495 : * ExecEndPlan
1496 : *
1497 : * Cleans up the query plan -- closes files and frees up storage
1498 : *
1499 : * NOTE: we are no longer very worried about freeing storage per se
1500 : * in this code; FreeExecutorState should be guaranteed to release all
1501 : * memory that needs to be released. What we are worried about doing
1502 : * is closing relations and dropping buffer pins. Thus, for example,
1503 : * tuple tables must be cleared or dropped to ensure pins are released.
1504 : * ----------------------------------------------------------------
1505 : */
1506 : static void
1507 544988 : ExecEndPlan(PlanState *planstate, EState *estate)
1508 : {
1509 : ListCell *l;
1510 :
1511 : /*
1512 : * shut down the node-type-specific query processing
1513 : */
1514 544988 : ExecEndNode(planstate);
1515 :
1516 : /*
1517 : * for subplans too
1518 : */
1519 588954 : foreach(l, estate->es_subplanstates)
1520 : {
1521 43966 : PlanState *subplanstate = (PlanState *) lfirst(l);
1522 :
1523 43966 : ExecEndNode(subplanstate);
1524 : }
1525 :
1526 : /*
1527 : * destroy the executor's tuple table. Actually we only care about
1528 : * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1529 : * the TupleTableSlots, since the containing memory context is about to go
1530 : * away anyway.
1531 : */
1532 544988 : ExecResetTupleTable(estate->es_tupleTable, false);
1533 :
1534 : /*
1535 : * Close any Relations that have been opened for range table entries or
1536 : * result relations.
1537 : */
1538 544988 : ExecCloseResultRelations(estate);
1539 544988 : ExecCloseRangeTableRelations(estate);
1540 544988 : }
1541 :
1542 : /*
1543 : * Close any relations that have been opened for ResultRelInfos.
1544 : */
1545 : void
1546 546976 : ExecCloseResultRelations(EState *estate)
1547 : {
1548 : ListCell *l;
1549 :
1550 : /*
1551 : * close indexes of result relation(s) if any. (Rels themselves are
1552 : * closed in ExecCloseRangeTableRelations())
1553 : *
1554 : * In addition, close the stub RTs that may be in each resultrel's
1555 : * ri_ancestorResultRels.
1556 : */
1557 664196 : foreach(l, estate->es_opened_result_relations)
1558 : {
1559 117220 : ResultRelInfo *resultRelInfo = lfirst(l);
1560 : ListCell *lc;
1561 :
1562 117220 : ExecCloseIndices(resultRelInfo);
1563 117472 : foreach(lc, resultRelInfo->ri_ancestorResultRels)
1564 : {
1565 252 : ResultRelInfo *rInfo = lfirst(lc);
1566 :
1567 : /*
1568 : * Ancestors with RTI > 0 (should only be the root ancestor) are
1569 : * closed by ExecCloseRangeTableRelations.
1570 : */
1571 252 : if (rInfo->ri_RangeTableIndex > 0)
1572 204 : continue;
1573 :
1574 48 : table_close(rInfo->ri_RelationDesc, NoLock);
1575 : }
1576 : }
1577 :
1578 : /* Close any relations that have been opened by ExecGetTriggerResultRel(). */
1579 547486 : foreach(l, estate->es_trig_target_relations)
1580 : {
1581 510 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1582 :
1583 : /*
1584 : * Assert this is a "dummy" ResultRelInfo, see above. Otherwise we
1585 : * might be issuing a duplicate close against a Relation opened by
1586 : * ExecGetRangeTableRelation.
1587 : */
1588 : Assert(resultRelInfo->ri_RangeTableIndex == 0);
1589 :
1590 : /*
1591 : * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for
1592 : * these rels, we needn't call ExecCloseIndices either.
1593 : */
1594 : Assert(resultRelInfo->ri_NumIndices == 0);
1595 :
1596 510 : table_close(resultRelInfo->ri_RelationDesc, NoLock);
1597 : }
1598 546976 : }
1599 :
1600 : /*
1601 : * Close all relations opened by ExecGetRangeTableRelation().
1602 : *
1603 : * We do not release any locks we might hold on those rels.
1604 : */
1605 : void
1606 546576 : ExecCloseRangeTableRelations(EState *estate)
1607 : {
1608 : int i;
1609 :
1610 1608822 : for (i = 0; i < estate->es_range_table_size; i++)
1611 : {
1612 1062246 : if (estate->es_relations[i])
1613 514700 : table_close(estate->es_relations[i], NoLock);
1614 : }
1615 546576 : }
1616 :
1617 : /* ----------------------------------------------------------------
1618 : * ExecutePlan
1619 : *
1620 : * Processes the query plan until we have retrieved 'numberTuples' tuples,
1621 : * moving in the specified direction.
1622 : *
1623 : * Runs to completion if numberTuples is 0
1624 : * ----------------------------------------------------------------
1625 : */
1626 : static void
1627 562496 : ExecutePlan(QueryDesc *queryDesc,
1628 : CmdType operation,
1629 : bool sendTuples,
1630 : uint64 numberTuples,
1631 : ScanDirection direction,
1632 : DestReceiver *dest)
1633 : {
1634 562496 : EState *estate = queryDesc->estate;
1635 562496 : PlanState *planstate = queryDesc->planstate;
1636 : bool use_parallel_mode;
1637 : TupleTableSlot *slot;
1638 : uint64 current_tuple_count;
1639 :
1640 : /*
1641 : * initialize local variables
1642 : */
1643 562496 : current_tuple_count = 0;
1644 :
1645 : /*
1646 : * Set the direction.
1647 : */
1648 562496 : estate->es_direction = direction;
1649 :
1650 : /*
1651 : * Set up parallel mode if appropriate.
1652 : *
1653 : * Parallel mode only supports complete execution of a plan. If we've
1654 : * already partially executed it, or if the caller asks us to exit early,
1655 : * we must force the plan to run without parallelism.
1656 : */
1657 562496 : if (queryDesc->already_executed || numberTuples != 0)
1658 128226 : use_parallel_mode = false;
1659 : else
1660 434270 : use_parallel_mode = queryDesc->plannedstmt->parallelModeNeeded;
1661 562496 : queryDesc->already_executed = true;
1662 :
1663 562496 : estate->es_use_parallel_mode = use_parallel_mode;
1664 562496 : if (use_parallel_mode)
1665 694 : EnterParallelMode();
1666 :
1667 : /*
1668 : * Loop until we've processed the proper number of tuples from the plan.
1669 : */
1670 : for (;;)
1671 : {
1672 : /* Reset the per-output-tuple exprcontext */
1673 12651796 : ResetPerTupleExprContext(estate);
1674 :
1675 : /*
1676 : * Execute the plan and obtain a tuple
1677 : */
1678 12651796 : slot = ExecProcNode(planstate);
1679 :
1680 : /*
1681 : * if the tuple is null, then we assume there is nothing more to
1682 : * process so we just end the loop...
1683 : */
1684 12628186 : if (TupIsNull(slot))
1685 : break;
1686 :
1687 : /*
1688 : * If we have a junk filter, then project a new tuple with the junk
1689 : * removed.
1690 : *
1691 : * Store this new "clean" tuple in the junkfilter's resultSlot.
1692 : * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1693 : * because that tuple slot has the wrong descriptor.)
1694 : */
1695 12187506 : if (estate->es_junkFilter != NULL)
1696 268074 : slot = ExecFilterJunk(estate->es_junkFilter, slot);
1697 :
1698 : /*
1699 : * If we are supposed to send the tuple somewhere, do so. (In
1700 : * practice, this is probably always the case at this point.)
1701 : */
1702 12187506 : if (sendTuples)
1703 : {
1704 : /*
1705 : * If we are not able to send the tuple, we assume the destination
1706 : * has closed and no more tuples can be sent. If that's the case,
1707 : * end the loop.
1708 : */
1709 12187506 : if (!dest->receiveSlot(slot, dest))
1710 0 : break;
1711 : }
1712 :
1713 : /*
1714 : * Count tuples processed, if this is a SELECT. (For other operation
1715 : * types, the ModifyTable plan node must count the appropriate
1716 : * events.)
1717 : */
1718 12187506 : if (operation == CMD_SELECT)
1719 12180842 : (estate->es_processed)++;
1720 :
1721 : /*
1722 : * check our tuple count.. if we've processed the proper number then
1723 : * quit, else loop again and process more tuples. Zero numberTuples
1724 : * means no limit.
1725 : */
1726 12187506 : current_tuple_count++;
1727 12187506 : if (numberTuples && numberTuples == current_tuple_count)
1728 98206 : break;
1729 : }
1730 :
1731 : /*
1732 : * If we know we won't need to back up, we can release resources at this
1733 : * point.
1734 : */
1735 538886 : if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD))
1736 531456 : ExecShutdownNode(planstate);
1737 :
1738 538886 : if (use_parallel_mode)
1739 682 : ExitParallelMode();
1740 538886 : }
1741 :
1742 :
1743 : /*
1744 : * ExecRelCheck --- check that tuple meets check constraints for result relation
1745 : *
1746 : * Returns NULL if OK, else name of failed check constraint
1747 : */
1748 : static const char *
1749 2782 : ExecRelCheck(ResultRelInfo *resultRelInfo,
1750 : TupleTableSlot *slot, EState *estate)
1751 : {
1752 2782 : Relation rel = resultRelInfo->ri_RelationDesc;
1753 2782 : int ncheck = rel->rd_att->constr->num_check;
1754 2782 : ConstrCheck *check = rel->rd_att->constr->check;
1755 : ExprContext *econtext;
1756 : MemoryContext oldContext;
1757 :
1758 : /*
1759 : * CheckNNConstraintFetch let this pass with only a warning, but now we
1760 : * should fail rather than possibly failing to enforce an important
1761 : * constraint.
1762 : */
1763 2782 : if (ncheck != rel->rd_rel->relchecks)
1764 0 : elog(ERROR, "%d pg_constraint record(s) missing for relation \"%s\"",
1765 : rel->rd_rel->relchecks - ncheck, RelationGetRelationName(rel));
1766 :
1767 : /*
1768 : * If first time through for this result relation, build expression
1769 : * nodetrees for rel's constraint expressions. Keep them in the per-query
1770 : * memory context so they'll survive throughout the query.
1771 : */
1772 2782 : if (resultRelInfo->ri_CheckConstraintExprs == NULL)
1773 : {
1774 1330 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1775 1330 : resultRelInfo->ri_CheckConstraintExprs = palloc0_array(ExprState *, ncheck);
1776 3340 : for (int i = 0; i < ncheck; i++)
1777 : {
1778 : Expr *checkconstr;
1779 :
1780 : /* Skip not enforced constraint */
1781 2016 : if (!check[i].ccenforced)
1782 204 : continue;
1783 :
1784 1812 : checkconstr = stringToNode(check[i].ccbin);
1785 1812 : checkconstr = (Expr *) expand_generated_columns_in_expr((Node *) checkconstr, rel, 1);
1786 1806 : resultRelInfo->ri_CheckConstraintExprs[i] =
1787 1812 : ExecPrepareExpr(checkconstr, estate);
1788 : }
1789 1324 : MemoryContextSwitchTo(oldContext);
1790 : }
1791 :
1792 : /*
1793 : * We will use the EState's per-tuple context for evaluating constraint
1794 : * expressions (creating it if it's not already there).
1795 : */
1796 2776 : econtext = GetPerTupleExprContext(estate);
1797 :
1798 : /* Arrange for econtext's scan tuple to be the tuple under test */
1799 2776 : econtext->ecxt_scantuple = slot;
1800 :
1801 : /* And evaluate the constraints */
1802 6228 : for (int i = 0; i < ncheck; i++)
1803 : {
1804 3900 : ExprState *checkconstr = resultRelInfo->ri_CheckConstraintExprs[i];
1805 :
1806 : /*
1807 : * NOTE: SQL specifies that a NULL result from a constraint expression
1808 : * is not to be treated as a failure. Therefore, use ExecCheck not
1809 : * ExecQual.
1810 : */
1811 3900 : if (checkconstr && !ExecCheck(checkconstr, econtext))
1812 448 : return check[i].ccname;
1813 : }
1814 :
1815 : /* NULL result means no error */
1816 2328 : return NULL;
1817 : }
1818 :
1819 : /*
1820 : * ExecPartitionCheck --- check that tuple meets the partition constraint.
1821 : *
1822 : * Returns true if it meets the partition constraint. If the constraint
1823 : * fails and we're asked to emit an error, do so and don't return; otherwise
1824 : * return false.
1825 : */
1826 : bool
1827 13372 : ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1828 : EState *estate, bool emitError)
1829 : {
1830 : ExprContext *econtext;
1831 : bool success;
1832 :
1833 : /*
1834 : * If first time through, build expression state tree for the partition
1835 : * check expression. (In the corner case where the partition check
1836 : * expression is empty, ie there's a default partition and nothing else,
1837 : * we'll be fooled into executing this code each time through. But it's
1838 : * pretty darn cheap in that case, so we don't worry about it.)
1839 : */
1840 13372 : if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1841 : {
1842 : /*
1843 : * Ensure that the qual tree and prepared expression are in the
1844 : * query-lifespan context.
1845 : */
1846 3510 : MemoryContext oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1847 3510 : List *qual = RelationGetPartitionQual(resultRelInfo->ri_RelationDesc);
1848 :
1849 3510 : resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1850 3510 : MemoryContextSwitchTo(oldcxt);
1851 : }
1852 :
1853 : /*
1854 : * We will use the EState's per-tuple context for evaluating constraint
1855 : * expressions (creating it if it's not already there).
1856 : */
1857 13372 : econtext = GetPerTupleExprContext(estate);
1858 :
1859 : /* Arrange for econtext's scan tuple to be the tuple under test */
1860 13372 : econtext->ecxt_scantuple = slot;
1861 :
1862 : /*
1863 : * As in case of the cataloged constraints, we treat a NULL result as
1864 : * success here, not a failure.
1865 : */
1866 13372 : success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
1867 :
1868 : /* if asked to emit error, don't actually return on failure */
1869 13372 : if (!success && emitError)
1870 202 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1871 :
1872 13170 : return success;
1873 : }
1874 :
1875 : /*
1876 : * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1877 : * partition constraint check.
1878 : */
1879 : void
1880 250 : ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
1881 : TupleTableSlot *slot,
1882 : EState *estate)
1883 : {
1884 : Oid root_relid;
1885 : TupleDesc tupdesc;
1886 : char *val_desc;
1887 : Bitmapset *modifiedCols;
1888 :
1889 : /*
1890 : * If the tuple has been routed, it's been converted to the partition's
1891 : * rowtype, which might differ from the root table's. We must convert it
1892 : * back to the root table's rowtype so that val_desc in the error message
1893 : * matches the input tuple.
1894 : */
1895 250 : if (resultRelInfo->ri_RootResultRelInfo)
1896 : {
1897 20 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
1898 : TupleDesc old_tupdesc;
1899 : AttrMap *map;
1900 :
1901 20 : root_relid = RelationGetRelid(rootrel->ri_RelationDesc);
1902 20 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
1903 :
1904 20 : old_tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1905 : /* a reverse map */
1906 20 : map = build_attrmap_by_name_if_req(old_tupdesc, tupdesc, false);
1907 :
1908 : /*
1909 : * Partition-specific slot's tupdesc can't be changed, so allocate a
1910 : * new one.
1911 : */
1912 20 : if (map != NULL)
1913 8 : slot = execute_attr_map_slot(map, slot,
1914 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1915 20 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
1916 20 : ExecGetUpdatedCols(rootrel, estate));
1917 : }
1918 : else
1919 : {
1920 230 : root_relid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1921 230 : tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1922 230 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
1923 230 : ExecGetUpdatedCols(resultRelInfo, estate));
1924 : }
1925 :
1926 250 : val_desc = ExecBuildSlotValueDescription(root_relid,
1927 : slot,
1928 : tupdesc,
1929 : modifiedCols,
1930 : 64);
1931 250 : ereport(ERROR,
1932 : (errcode(ERRCODE_CHECK_VIOLATION),
1933 : errmsg("new row for relation \"%s\" violates partition constraint",
1934 : RelationGetRelationName(resultRelInfo->ri_RelationDesc)),
1935 : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1936 : errtable(resultRelInfo->ri_RelationDesc)));
1937 : }
1938 :
1939 : /*
1940 : * ExecConstraints - check constraints of the tuple in 'slot'
1941 : *
1942 : * This checks the traditional NOT NULL and check constraints.
1943 : *
1944 : * The partition constraint is *NOT* checked.
1945 : *
1946 : * Note: 'slot' contains the tuple to check the constraints of, which may
1947 : * have been converted from the original input tuple after tuple routing.
1948 : * 'resultRelInfo' is the final result relation, after tuple routing.
1949 : */
1950 : void
1951 4532164 : ExecConstraints(ResultRelInfo *resultRelInfo,
1952 : TupleTableSlot *slot, EState *estate)
1953 : {
1954 4532164 : Relation rel = resultRelInfo->ri_RelationDesc;
1955 4532164 : TupleDesc tupdesc = RelationGetDescr(rel);
1956 4532164 : TupleConstr *constr = tupdesc->constr;
1957 : Bitmapset *modifiedCols;
1958 4532164 : List *notnull_virtual_attrs = NIL;
1959 :
1960 : Assert(constr); /* we should not be called otherwise */
1961 :
1962 : /*
1963 : * Verify not-null constraints.
1964 : *
1965 : * Not-null constraints on virtual generated columns are collected and
1966 : * checked separately below.
1967 : */
1968 4532164 : if (constr->has_not_null)
1969 : {
1970 16791604 : for (AttrNumber attnum = 1; attnum <= tupdesc->natts; attnum++)
1971 : {
1972 12265628 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
1973 :
1974 12265628 : if (att->attnotnull && att->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
1975 90 : notnull_virtual_attrs = lappend_int(notnull_virtual_attrs, attnum);
1976 12265538 : else if (att->attnotnull && slot_attisnull(slot, attnum))
1977 314 : ReportNotNullViolationError(resultRelInfo, slot, estate, attnum);
1978 : }
1979 : }
1980 :
1981 : /*
1982 : * Verify not-null constraints on virtual generated column, if any.
1983 : */
1984 4531850 : if (notnull_virtual_attrs)
1985 : {
1986 : AttrNumber attnum;
1987 :
1988 90 : attnum = ExecRelGenVirtualNotNull(resultRelInfo, slot, estate,
1989 : notnull_virtual_attrs);
1990 90 : if (attnum != InvalidAttrNumber)
1991 42 : ReportNotNullViolationError(resultRelInfo, slot, estate, attnum);
1992 : }
1993 :
1994 : /*
1995 : * Verify check constraints.
1996 : */
1997 4531808 : if (rel->rd_rel->relchecks > 0)
1998 : {
1999 : const char *failed;
2000 :
2001 2782 : if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2002 : {
2003 : char *val_desc;
2004 448 : Relation orig_rel = rel;
2005 :
2006 : /*
2007 : * If the tuple has been routed, it's been converted to the
2008 : * partition's rowtype, which might differ from the root table's.
2009 : * We must convert it back to the root table's rowtype so that
2010 : * val_desc shown error message matches the input tuple.
2011 : */
2012 448 : if (resultRelInfo->ri_RootResultRelInfo)
2013 : {
2014 90 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2015 90 : TupleDesc old_tupdesc = RelationGetDescr(rel);
2016 : AttrMap *map;
2017 :
2018 90 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2019 : /* a reverse map */
2020 90 : map = build_attrmap_by_name_if_req(old_tupdesc,
2021 : tupdesc,
2022 : false);
2023 :
2024 : /*
2025 : * Partition-specific slot's tupdesc can't be changed, so
2026 : * allocate a new one.
2027 : */
2028 90 : if (map != NULL)
2029 60 : slot = execute_attr_map_slot(map, slot,
2030 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2031 90 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2032 90 : ExecGetUpdatedCols(rootrel, estate));
2033 90 : rel = rootrel->ri_RelationDesc;
2034 : }
2035 : else
2036 358 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2037 358 : ExecGetUpdatedCols(resultRelInfo, estate));
2038 448 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2039 : slot,
2040 : tupdesc,
2041 : modifiedCols,
2042 : 64);
2043 448 : ereport(ERROR,
2044 : (errcode(ERRCODE_CHECK_VIOLATION),
2045 : errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2046 : RelationGetRelationName(orig_rel), failed),
2047 : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2048 : errtableconstraint(orig_rel, failed)));
2049 : }
2050 : }
2051 4531354 : }
2052 :
2053 : /*
2054 : * Verify not-null constraints on virtual generated columns of the given
2055 : * tuple slot.
2056 : *
2057 : * Return value of InvalidAttrNumber means all not-null constraints on virtual
2058 : * generated columns are satisfied. A return value > 0 means a not-null
2059 : * violation happened for that attribute.
2060 : *
2061 : * notnull_virtual_attrs is the list of the attnums of virtual generated column with
2062 : * not-null constraints.
2063 : */
2064 : AttrNumber
2065 174 : ExecRelGenVirtualNotNull(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
2066 : EState *estate, List *notnull_virtual_attrs)
2067 : {
2068 174 : Relation rel = resultRelInfo->ri_RelationDesc;
2069 : ExprContext *econtext;
2070 : MemoryContext oldContext;
2071 :
2072 : /*
2073 : * We implement this by building a NullTest node for each virtual
2074 : * generated column, which we cache in resultRelInfo, and running those
2075 : * through ExecCheck().
2076 : */
2077 174 : if (resultRelInfo->ri_GenVirtualNotNullConstraintExprs == NULL)
2078 : {
2079 126 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
2080 126 : resultRelInfo->ri_GenVirtualNotNullConstraintExprs =
2081 126 : palloc0_array(ExprState *, list_length(notnull_virtual_attrs));
2082 :
2083 408 : foreach_int(attnum, notnull_virtual_attrs)
2084 : {
2085 156 : int i = foreach_current_index(attnum);
2086 : NullTest *nnulltest;
2087 :
2088 : /* "generated_expression IS NOT NULL" check. */
2089 156 : nnulltest = makeNode(NullTest);
2090 156 : nnulltest->arg = (Expr *) build_generation_expression(rel, attnum);
2091 156 : nnulltest->nulltesttype = IS_NOT_NULL;
2092 156 : nnulltest->argisrow = false;
2093 156 : nnulltest->location = -1;
2094 :
2095 156 : resultRelInfo->ri_GenVirtualNotNullConstraintExprs[i] =
2096 156 : ExecPrepareExpr((Expr *) nnulltest, estate);
2097 : }
2098 126 : MemoryContextSwitchTo(oldContext);
2099 : }
2100 :
2101 : /*
2102 : * We will use the EState's per-tuple context for evaluating virtual
2103 : * generated column not null constraint expressions (creating it if it's
2104 : * not already there).
2105 : */
2106 174 : econtext = GetPerTupleExprContext(estate);
2107 :
2108 : /* Arrange for econtext's scan tuple to be the tuple under test */
2109 174 : econtext->ecxt_scantuple = slot;
2110 :
2111 : /* And evaluate the check constraints for virtual generated column */
2112 432 : foreach_int(attnum, notnull_virtual_attrs)
2113 : {
2114 228 : int i = foreach_current_index(attnum);
2115 228 : ExprState *exprstate = resultRelInfo->ri_GenVirtualNotNullConstraintExprs[i];
2116 :
2117 : Assert(exprstate != NULL);
2118 228 : if (!ExecCheck(exprstate, econtext))
2119 72 : return attnum;
2120 : }
2121 :
2122 : /* InvalidAttrNumber result means no error */
2123 102 : return InvalidAttrNumber;
2124 : }
2125 :
2126 : /*
2127 : * Report a violation of a not-null constraint that was already detected.
2128 : */
2129 : static void
2130 356 : ReportNotNullViolationError(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
2131 : EState *estate, int attnum)
2132 : {
2133 : Bitmapset *modifiedCols;
2134 : char *val_desc;
2135 356 : Relation rel = resultRelInfo->ri_RelationDesc;
2136 356 : Relation orig_rel = rel;
2137 356 : TupleDesc tupdesc = RelationGetDescr(rel);
2138 356 : TupleDesc orig_tupdesc = RelationGetDescr(rel);
2139 356 : Form_pg_attribute att = TupleDescAttr(tupdesc, attnum - 1);
2140 :
2141 : Assert(attnum > 0);
2142 :
2143 : /*
2144 : * If the tuple has been routed, it's been converted to the partition's
2145 : * rowtype, which might differ from the root table's. We must convert it
2146 : * back to the root table's rowtype so that val_desc shown error message
2147 : * matches the input tuple.
2148 : */
2149 356 : if (resultRelInfo->ri_RootResultRelInfo)
2150 : {
2151 72 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2152 : AttrMap *map;
2153 :
2154 72 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2155 : /* a reverse map */
2156 72 : map = build_attrmap_by_name_if_req(orig_tupdesc,
2157 : tupdesc,
2158 : false);
2159 :
2160 : /*
2161 : * Partition-specific slot's tupdesc can't be changed, so allocate a
2162 : * new one.
2163 : */
2164 72 : if (map != NULL)
2165 42 : slot = execute_attr_map_slot(map, slot,
2166 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2167 72 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2168 72 : ExecGetUpdatedCols(rootrel, estate));
2169 72 : rel = rootrel->ri_RelationDesc;
2170 : }
2171 : else
2172 284 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2173 284 : ExecGetUpdatedCols(resultRelInfo, estate));
2174 :
2175 356 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2176 : slot,
2177 : tupdesc,
2178 : modifiedCols,
2179 : 64);
2180 356 : ereport(ERROR,
2181 : errcode(ERRCODE_NOT_NULL_VIOLATION),
2182 : errmsg("null value in column \"%s\" of relation \"%s\" violates not-null constraint",
2183 : NameStr(att->attname),
2184 : RelationGetRelationName(orig_rel)),
2185 : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2186 : errtablecol(orig_rel, attnum));
2187 : }
2188 :
2189 : /*
2190 : * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2191 : * of the specified kind.
2192 : *
2193 : * Note that this needs to be called multiple times to ensure that all kinds of
2194 : * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2195 : * CHECK OPTION set and from row-level security policies). See ExecInsert()
2196 : * and ExecUpdate().
2197 : */
2198 : void
2199 2102 : ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2200 : TupleTableSlot *slot, EState *estate)
2201 : {
2202 2102 : Relation rel = resultRelInfo->ri_RelationDesc;
2203 2102 : TupleDesc tupdesc = RelationGetDescr(rel);
2204 : ExprContext *econtext;
2205 : ListCell *l1,
2206 : *l2;
2207 :
2208 : /*
2209 : * We will use the EState's per-tuple context for evaluating constraint
2210 : * expressions (creating it if it's not already there).
2211 : */
2212 2102 : econtext = GetPerTupleExprContext(estate);
2213 :
2214 : /* Arrange for econtext's scan tuple to be the tuple under test */
2215 2102 : econtext->ecxt_scantuple = slot;
2216 :
2217 : /* Check each of the constraints */
2218 5062 : forboth(l1, resultRelInfo->ri_WithCheckOptions,
2219 : l2, resultRelInfo->ri_WithCheckOptionExprs)
2220 : {
2221 3482 : WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
2222 3482 : ExprState *wcoExpr = (ExprState *) lfirst(l2);
2223 :
2224 : /*
2225 : * Skip any WCOs which are not the kind we are looking for at this
2226 : * time.
2227 : */
2228 3482 : if (wco->kind != kind)
2229 1960 : continue;
2230 :
2231 : /*
2232 : * WITH CHECK OPTION checks are intended to ensure that the new tuple
2233 : * is visible (in the case of a view) or that it passes the
2234 : * 'with-check' policy (in the case of row security). If the qual
2235 : * evaluates to NULL or FALSE, then the new tuple won't be included in
2236 : * the view or doesn't pass the 'with-check' policy for the table.
2237 : */
2238 1522 : if (!ExecQual(wcoExpr, econtext))
2239 : {
2240 : char *val_desc;
2241 : Bitmapset *modifiedCols;
2242 :
2243 522 : switch (wco->kind)
2244 : {
2245 : /*
2246 : * For WITH CHECK OPTIONs coming from views, we might be
2247 : * able to provide the details on the row, depending on
2248 : * the permissions on the relation (that is, if the user
2249 : * could view it directly anyway). For RLS violations, we
2250 : * don't include the data since we don't know if the user
2251 : * should be able to view the tuple as that depends on the
2252 : * USING policy.
2253 : */
2254 228 : case WCO_VIEW_CHECK:
2255 : /* See the comment in ExecConstraints(). */
2256 228 : if (resultRelInfo->ri_RootResultRelInfo)
2257 : {
2258 42 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2259 42 : TupleDesc old_tupdesc = RelationGetDescr(rel);
2260 : AttrMap *map;
2261 :
2262 42 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2263 : /* a reverse map */
2264 42 : map = build_attrmap_by_name_if_req(old_tupdesc,
2265 : tupdesc,
2266 : false);
2267 :
2268 : /*
2269 : * Partition-specific slot's tupdesc can't be changed,
2270 : * so allocate a new one.
2271 : */
2272 42 : if (map != NULL)
2273 24 : slot = execute_attr_map_slot(map, slot,
2274 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2275 :
2276 42 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2277 42 : ExecGetUpdatedCols(rootrel, estate));
2278 42 : rel = rootrel->ri_RelationDesc;
2279 : }
2280 : else
2281 186 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2282 186 : ExecGetUpdatedCols(resultRelInfo, estate));
2283 228 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2284 : slot,
2285 : tupdesc,
2286 : modifiedCols,
2287 : 64);
2288 :
2289 228 : ereport(ERROR,
2290 : (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2291 : errmsg("new row violates check option for view \"%s\"",
2292 : wco->relname),
2293 : val_desc ? errdetail("Failing row contains %s.",
2294 : val_desc) : 0));
2295 : break;
2296 246 : case WCO_RLS_INSERT_CHECK:
2297 : case WCO_RLS_UPDATE_CHECK:
2298 246 : if (wco->polname != NULL)
2299 60 : ereport(ERROR,
2300 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2301 : errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2302 : wco->polname, wco->relname)));
2303 : else
2304 186 : ereport(ERROR,
2305 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2306 : errmsg("new row violates row-level security policy for table \"%s\"",
2307 : wco->relname)));
2308 : break;
2309 24 : case WCO_RLS_MERGE_UPDATE_CHECK:
2310 : case WCO_RLS_MERGE_DELETE_CHECK:
2311 24 : if (wco->polname != NULL)
2312 0 : ereport(ERROR,
2313 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2314 : errmsg("target row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2315 : wco->polname, wco->relname)));
2316 : else
2317 24 : ereport(ERROR,
2318 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2319 : errmsg("target row violates row-level security policy (USING expression) for table \"%s\"",
2320 : wco->relname)));
2321 : break;
2322 24 : case WCO_RLS_CONFLICT_CHECK:
2323 24 : if (wco->polname != NULL)
2324 0 : ereport(ERROR,
2325 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2326 : errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2327 : wco->polname, wco->relname)));
2328 : else
2329 24 : ereport(ERROR,
2330 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2331 : errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2332 : wco->relname)));
2333 : break;
2334 0 : default:
2335 0 : elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2336 : break;
2337 : }
2338 : }
2339 : }
2340 1580 : }
2341 :
2342 : /*
2343 : * ExecBuildSlotValueDescription -- construct a string representing a tuple
2344 : *
2345 : * This is intentionally very similar to BuildIndexValueDescription, but
2346 : * unlike that function, we truncate long field values (to at most maxfieldlen
2347 : * bytes). That seems necessary here since heap field values could be very
2348 : * long, whereas index entries typically aren't so wide.
2349 : *
2350 : * Also, unlike the case with index entries, we need to be prepared to ignore
2351 : * dropped columns. We used to use the slot's tuple descriptor to decode the
2352 : * data, but the slot's descriptor doesn't identify dropped columns, so we
2353 : * now need to be passed the relation's descriptor.
2354 : *
2355 : * Note that, like BuildIndexValueDescription, if the user does not have
2356 : * permission to view any of the columns involved, a NULL is returned. Unlike
2357 : * BuildIndexValueDescription, if the user has access to view a subset of the
2358 : * column involved, that subset will be returned with a key identifying which
2359 : * columns they are.
2360 : */
2361 : char *
2362 1440 : ExecBuildSlotValueDescription(Oid reloid,
2363 : TupleTableSlot *slot,
2364 : TupleDesc tupdesc,
2365 : Bitmapset *modifiedCols,
2366 : int maxfieldlen)
2367 : {
2368 : StringInfoData buf;
2369 : StringInfoData collist;
2370 1440 : bool write_comma = false;
2371 1440 : bool write_comma_collist = false;
2372 : int i;
2373 : AclResult aclresult;
2374 1440 : bool table_perm = false;
2375 1440 : bool any_perm = false;
2376 :
2377 : /*
2378 : * Check if RLS is enabled and should be active for the relation; if so,
2379 : * then don't return anything. Otherwise, go through normal permission
2380 : * checks.
2381 : */
2382 1440 : if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2383 0 : return NULL;
2384 :
2385 1440 : initStringInfo(&buf);
2386 :
2387 1440 : appendStringInfoChar(&buf, '(');
2388 :
2389 : /*
2390 : * Check if the user has permissions to see the row. Table-level SELECT
2391 : * allows access to all columns. If the user does not have table-level
2392 : * SELECT then we check each column and include those the user has SELECT
2393 : * rights on. Additionally, we always include columns the user provided
2394 : * data for.
2395 : */
2396 1440 : aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2397 1440 : if (aclresult != ACLCHECK_OK)
2398 : {
2399 : /* Set up the buffer for the column list */
2400 60 : initStringInfo(&collist);
2401 60 : appendStringInfoChar(&collist, '(');
2402 : }
2403 : else
2404 1380 : table_perm = any_perm = true;
2405 :
2406 : /* Make sure the tuple is fully deconstructed */
2407 1440 : slot_getallattrs(slot);
2408 :
2409 5166 : for (i = 0; i < tupdesc->natts; i++)
2410 : {
2411 3726 : bool column_perm = false;
2412 : char *val;
2413 : int vallen;
2414 3726 : Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2415 :
2416 : /* ignore dropped columns */
2417 3726 : if (att->attisdropped)
2418 38 : continue;
2419 :
2420 3688 : if (!table_perm)
2421 : {
2422 : /*
2423 : * No table-level SELECT, so need to make sure they either have
2424 : * SELECT rights on the column or that they have provided the data
2425 : * for the column. If not, omit this column from the error
2426 : * message.
2427 : */
2428 234 : aclresult = pg_attribute_aclcheck(reloid, att->attnum,
2429 : GetUserId(), ACL_SELECT);
2430 234 : if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
2431 138 : modifiedCols) || aclresult == ACLCHECK_OK)
2432 : {
2433 144 : column_perm = any_perm = true;
2434 :
2435 144 : if (write_comma_collist)
2436 84 : appendStringInfoString(&collist, ", ");
2437 : else
2438 60 : write_comma_collist = true;
2439 :
2440 144 : appendStringInfoString(&collist, NameStr(att->attname));
2441 : }
2442 : }
2443 :
2444 3688 : if (table_perm || column_perm)
2445 : {
2446 3598 : if (att->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
2447 54 : val = "virtual";
2448 3544 : else if (slot->tts_isnull[i])
2449 634 : val = "null";
2450 : else
2451 : {
2452 : Oid foutoid;
2453 : bool typisvarlena;
2454 :
2455 2910 : getTypeOutputInfo(att->atttypid,
2456 : &foutoid, &typisvarlena);
2457 2910 : val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2458 : }
2459 :
2460 3598 : if (write_comma)
2461 2158 : appendStringInfoString(&buf, ", ");
2462 : else
2463 1440 : write_comma = true;
2464 :
2465 : /* truncate if needed */
2466 3598 : vallen = strlen(val);
2467 3598 : if (vallen <= maxfieldlen)
2468 3596 : appendBinaryStringInfo(&buf, val, vallen);
2469 : else
2470 : {
2471 2 : vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2472 2 : appendBinaryStringInfo(&buf, val, vallen);
2473 2 : appendStringInfoString(&buf, "...");
2474 : }
2475 : }
2476 : }
2477 :
2478 : /* If we end up with zero columns being returned, then return NULL. */
2479 1440 : if (!any_perm)
2480 0 : return NULL;
2481 :
2482 1440 : appendStringInfoChar(&buf, ')');
2483 :
2484 1440 : if (!table_perm)
2485 : {
2486 60 : appendStringInfoString(&collist, ") = ");
2487 60 : appendBinaryStringInfo(&collist, buf.data, buf.len);
2488 :
2489 60 : return collist.data;
2490 : }
2491 :
2492 1380 : return buf.data;
2493 : }
2494 :
2495 :
2496 : /*
2497 : * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2498 : * given ResultRelInfo
2499 : */
2500 : LockTupleMode
2501 7836 : ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2502 : {
2503 : Bitmapset *keyCols;
2504 : Bitmapset *updatedCols;
2505 :
2506 : /*
2507 : * Compute lock mode to use. If columns that are part of the key have not
2508 : * been modified, then we can use a weaker lock, allowing for better
2509 : * concurrency.
2510 : */
2511 7836 : updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2512 7836 : keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2513 : INDEX_ATTR_BITMAP_KEY);
2514 :
2515 7836 : if (bms_overlap(keyCols, updatedCols))
2516 264 : return LockTupleExclusive;
2517 :
2518 7572 : return LockTupleNoKeyExclusive;
2519 : }
2520 :
2521 : /*
2522 : * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2523 : *
2524 : * If no such struct, either return NULL or throw error depending on missing_ok
2525 : */
2526 : ExecRowMark *
2527 10914 : ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2528 : {
2529 10914 : if (rti > 0 && rti <= estate->es_range_table_size &&
2530 10914 : estate->es_rowmarks != NULL)
2531 : {
2532 10914 : ExecRowMark *erm = estate->es_rowmarks[rti - 1];
2533 :
2534 10914 : if (erm)
2535 10914 : return erm;
2536 : }
2537 0 : if (!missing_ok)
2538 0 : elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2539 0 : return NULL;
2540 : }
2541 :
2542 : /*
2543 : * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2544 : *
2545 : * Inputs are the underlying ExecRowMark struct and the targetlist of the
2546 : * input plan node (not planstate node!). We need the latter to find out
2547 : * the column numbers of the resjunk columns.
2548 : */
2549 : ExecAuxRowMark *
2550 10914 : ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2551 : {
2552 10914 : ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2553 : char resname[32];
2554 :
2555 10914 : aerm->rowmark = erm;
2556 :
2557 : /* Look up the resjunk columns associated with this rowmark */
2558 10914 : if (erm->markType != ROW_MARK_COPY)
2559 : {
2560 : /* need ctid for all methods other than COPY */
2561 10630 : snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2562 10630 : aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2563 : resname);
2564 10630 : if (!AttributeNumberIsValid(aerm->ctidAttNo))
2565 0 : elog(ERROR, "could not find junk %s column", resname);
2566 : }
2567 : else
2568 : {
2569 : /* need wholerow if COPY */
2570 284 : snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2571 284 : aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2572 : resname);
2573 284 : if (!AttributeNumberIsValid(aerm->wholeAttNo))
2574 0 : elog(ERROR, "could not find junk %s column", resname);
2575 : }
2576 :
2577 : /* if child rel, need tableoid */
2578 10914 : if (erm->rti != erm->prti)
2579 : {
2580 1966 : snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2581 1966 : aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2582 : resname);
2583 1966 : if (!AttributeNumberIsValid(aerm->toidAttNo))
2584 0 : elog(ERROR, "could not find junk %s column", resname);
2585 : }
2586 :
2587 10914 : return aerm;
2588 : }
2589 :
2590 :
2591 : /*
2592 : * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2593 : * process the updated version under READ COMMITTED rules.
2594 : *
2595 : * See backend/executor/README for some info about how this works.
2596 : */
2597 :
2598 :
2599 : /*
2600 : * Check the updated version of a tuple to see if we want to process it under
2601 : * READ COMMITTED rules.
2602 : *
2603 : * epqstate - state for EvalPlanQual rechecking
2604 : * relation - table containing tuple
2605 : * rti - rangetable index of table containing tuple
2606 : * inputslot - tuple for processing - this can be the slot from
2607 : * EvalPlanQualSlot() for this rel, for increased efficiency.
2608 : *
2609 : * This tests whether the tuple in inputslot still matches the relevant
2610 : * quals. For that result to be useful, typically the input tuple has to be
2611 : * last row version (otherwise the result isn't particularly useful) and
2612 : * locked (otherwise the result might be out of date). That's typically
2613 : * achieved by using table_tuple_lock() with the
2614 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
2615 : *
2616 : * Returns a slot containing the new candidate update/delete tuple, or
2617 : * NULL if we determine we shouldn't process the row.
2618 : */
2619 : TupleTableSlot *
2620 252 : EvalPlanQual(EPQState *epqstate, Relation relation,
2621 : Index rti, TupleTableSlot *inputslot)
2622 : {
2623 : TupleTableSlot *slot;
2624 : TupleTableSlot *testslot;
2625 :
2626 : Assert(rti > 0);
2627 :
2628 : /*
2629 : * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2630 : */
2631 252 : EvalPlanQualBegin(epqstate);
2632 :
2633 : /*
2634 : * Callers will often use the EvalPlanQualSlot to store the tuple to avoid
2635 : * an unnecessary copy.
2636 : */
2637 252 : testslot = EvalPlanQualSlot(epqstate, relation, rti);
2638 252 : if (testslot != inputslot)
2639 12 : ExecCopySlot(testslot, inputslot);
2640 :
2641 : /*
2642 : * Mark that an EPQ tuple is available for this relation. (If there is
2643 : * more than one result relation, the others remain marked as having no
2644 : * tuple available.)
2645 : */
2646 252 : epqstate->relsubs_done[rti - 1] = false;
2647 252 : epqstate->relsubs_blocked[rti - 1] = false;
2648 :
2649 : /*
2650 : * Run the EPQ query. We assume it will return at most one tuple.
2651 : */
2652 252 : slot = EvalPlanQualNext(epqstate);
2653 :
2654 : /*
2655 : * If we got a tuple, force the slot to materialize the tuple so that it
2656 : * is not dependent on any local state in the EPQ query (in particular,
2657 : * it's highly likely that the slot contains references to any pass-by-ref
2658 : * datums that may be present in copyTuple). As with the next step, this
2659 : * is to guard against early re-use of the EPQ query.
2660 : */
2661 252 : if (!TupIsNull(slot))
2662 184 : ExecMaterializeSlot(slot);
2663 :
2664 : /*
2665 : * Clear out the test tuple, and mark that no tuple is available here.
2666 : * This is needed in case the EPQ state is re-used to test a tuple for a
2667 : * different target relation.
2668 : */
2669 252 : ExecClearTuple(testslot);
2670 252 : epqstate->relsubs_blocked[rti - 1] = true;
2671 :
2672 252 : return slot;
2673 : }
2674 :
2675 : /*
2676 : * EvalPlanQualInit -- initialize during creation of a plan state node
2677 : * that might need to invoke EPQ processing.
2678 : *
2679 : * If the caller intends to use EvalPlanQual(), resultRelations should be
2680 : * a list of RT indexes of potential target relations for EvalPlanQual(),
2681 : * and we will arrange that the other listed relations don't return any
2682 : * tuple during an EvalPlanQual() call. Otherwise resultRelations
2683 : * should be NIL.
2684 : *
2685 : * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2686 : * with EvalPlanQualSetPlan.
2687 : */
2688 : void
2689 268602 : EvalPlanQualInit(EPQState *epqstate, EState *parentestate,
2690 : Plan *subplan, List *auxrowmarks,
2691 : int epqParam, List *resultRelations)
2692 : {
2693 268602 : Index rtsize = parentestate->es_range_table_size;
2694 :
2695 : /* initialize data not changing over EPQState's lifetime */
2696 268602 : epqstate->parentestate = parentestate;
2697 268602 : epqstate->epqParam = epqParam;
2698 268602 : epqstate->resultRelations = resultRelations;
2699 :
2700 : /*
2701 : * Allocate space to reference a slot for each potential rti - do so now
2702 : * rather than in EvalPlanQualBegin(), as done for other dynamically
2703 : * allocated resources, so EvalPlanQualSlot() can be used to hold tuples
2704 : * that *may* need EPQ later, without forcing the overhead of
2705 : * EvalPlanQualBegin().
2706 : */
2707 268602 : epqstate->tuple_table = NIL;
2708 268602 : epqstate->relsubs_slot = (TupleTableSlot **)
2709 268602 : palloc0(rtsize * sizeof(TupleTableSlot *));
2710 :
2711 : /* ... and remember data that EvalPlanQualBegin will need */
2712 268602 : epqstate->plan = subplan;
2713 268602 : epqstate->arowMarks = auxrowmarks;
2714 :
2715 : /* ... and mark the EPQ state inactive */
2716 268602 : epqstate->origslot = NULL;
2717 268602 : epqstate->recheckestate = NULL;
2718 268602 : epqstate->recheckplanstate = NULL;
2719 268602 : epqstate->relsubs_rowmark = NULL;
2720 268602 : epqstate->relsubs_done = NULL;
2721 268602 : epqstate->relsubs_blocked = NULL;
2722 268602 : }
2723 :
2724 : /*
2725 : * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2726 : *
2727 : * We used to need this so that ModifyTable could deal with multiple subplans.
2728 : * It could now be refactored out of existence.
2729 : */
2730 : void
2731 115588 : EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2732 : {
2733 : /* If we have a live EPQ query, shut it down */
2734 115588 : EvalPlanQualEnd(epqstate);
2735 : /* And set/change the plan pointer */
2736 115588 : epqstate->plan = subplan;
2737 : /* The rowmarks depend on the plan, too */
2738 115588 : epqstate->arowMarks = auxrowmarks;
2739 115588 : }
2740 :
2741 : /*
2742 : * Return, and create if necessary, a slot for an EPQ test tuple.
2743 : *
2744 : * Note this only requires EvalPlanQualInit() to have been called,
2745 : * EvalPlanQualBegin() is not necessary.
2746 : */
2747 : TupleTableSlot *
2748 17466 : EvalPlanQualSlot(EPQState *epqstate,
2749 : Relation relation, Index rti)
2750 : {
2751 : TupleTableSlot **slot;
2752 :
2753 : Assert(relation);
2754 : Assert(rti > 0 && rti <= epqstate->parentestate->es_range_table_size);
2755 17466 : slot = &epqstate->relsubs_slot[rti - 1];
2756 :
2757 17466 : if (*slot == NULL)
2758 : {
2759 : MemoryContext oldcontext;
2760 :
2761 5980 : oldcontext = MemoryContextSwitchTo(epqstate->parentestate->es_query_cxt);
2762 5980 : *slot = table_slot_create(relation, &epqstate->tuple_table);
2763 5980 : MemoryContextSwitchTo(oldcontext);
2764 : }
2765 :
2766 17466 : return *slot;
2767 : }
2768 :
2769 : /*
2770 : * Fetch the current row value for a non-locked relation, identified by rti,
2771 : * that needs to be scanned by an EvalPlanQual operation. origslot must have
2772 : * been set to contain the current result row (top-level row) that we need to
2773 : * recheck. Returns true if a substitution tuple was found, false if not.
2774 : */
2775 : bool
2776 26 : EvalPlanQualFetchRowMark(EPQState *epqstate, Index rti, TupleTableSlot *slot)
2777 : {
2778 26 : ExecAuxRowMark *earm = epqstate->relsubs_rowmark[rti - 1];
2779 : ExecRowMark *erm;
2780 : Datum datum;
2781 : bool isNull;
2782 :
2783 : Assert(earm != NULL);
2784 : Assert(epqstate->origslot != NULL);
2785 :
2786 26 : erm = earm->rowmark;
2787 :
2788 26 : if (RowMarkRequiresRowShareLock(erm->markType))
2789 0 : elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2790 :
2791 : /* if child rel, must check whether it produced this row */
2792 26 : if (erm->rti != erm->prti)
2793 : {
2794 : Oid tableoid;
2795 :
2796 0 : datum = ExecGetJunkAttribute(epqstate->origslot,
2797 0 : earm->toidAttNo,
2798 : &isNull);
2799 : /* non-locked rels could be on the inside of outer joins */
2800 0 : if (isNull)
2801 0 : return false;
2802 :
2803 0 : tableoid = DatumGetObjectId(datum);
2804 :
2805 : Assert(OidIsValid(erm->relid));
2806 0 : if (tableoid != erm->relid)
2807 : {
2808 : /* this child is inactive right now */
2809 0 : return false;
2810 : }
2811 : }
2812 :
2813 26 : if (erm->markType == ROW_MARK_REFERENCE)
2814 : {
2815 : Assert(erm->relation != NULL);
2816 :
2817 : /* fetch the tuple's ctid */
2818 26 : datum = ExecGetJunkAttribute(epqstate->origslot,
2819 26 : earm->ctidAttNo,
2820 : &isNull);
2821 : /* non-locked rels could be on the inside of outer joins */
2822 26 : if (isNull)
2823 0 : return false;
2824 :
2825 : /* fetch requests on foreign tables must be passed to their FDW */
2826 26 : if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2827 : {
2828 : FdwRoutine *fdwroutine;
2829 0 : bool updated = false;
2830 :
2831 0 : fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2832 : /* this should have been checked already, but let's be safe */
2833 0 : if (fdwroutine->RefetchForeignRow == NULL)
2834 0 : ereport(ERROR,
2835 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2836 : errmsg("cannot lock rows in foreign table \"%s\"",
2837 : RelationGetRelationName(erm->relation))));
2838 :
2839 0 : fdwroutine->RefetchForeignRow(epqstate->recheckestate,
2840 : erm,
2841 : datum,
2842 : slot,
2843 : &updated);
2844 0 : if (TupIsNull(slot))
2845 0 : elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2846 :
2847 : /*
2848 : * Ideally we'd insist on updated == false here, but that assumes
2849 : * that FDWs can track that exactly, which they might not be able
2850 : * to. So just ignore the flag.
2851 : */
2852 0 : return true;
2853 : }
2854 : else
2855 : {
2856 : /* ordinary table, fetch the tuple */
2857 26 : if (!table_tuple_fetch_row_version(erm->relation,
2858 26 : (ItemPointer) DatumGetPointer(datum),
2859 : SnapshotAny, slot))
2860 0 : elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2861 26 : return true;
2862 : }
2863 : }
2864 : else
2865 : {
2866 : Assert(erm->markType == ROW_MARK_COPY);
2867 :
2868 : /* fetch the whole-row Var for the relation */
2869 0 : datum = ExecGetJunkAttribute(epqstate->origslot,
2870 0 : earm->wholeAttNo,
2871 : &isNull);
2872 : /* non-locked rels could be on the inside of outer joins */
2873 0 : if (isNull)
2874 0 : return false;
2875 :
2876 0 : ExecStoreHeapTupleDatum(datum, slot);
2877 0 : return true;
2878 : }
2879 : }
2880 :
2881 : /*
2882 : * Fetch the next row (if any) from EvalPlanQual testing
2883 : *
2884 : * (In practice, there should never be more than one row...)
2885 : */
2886 : TupleTableSlot *
2887 308 : EvalPlanQualNext(EPQState *epqstate)
2888 : {
2889 : MemoryContext oldcontext;
2890 : TupleTableSlot *slot;
2891 :
2892 308 : oldcontext = MemoryContextSwitchTo(epqstate->recheckestate->es_query_cxt);
2893 308 : slot = ExecProcNode(epqstate->recheckplanstate);
2894 308 : MemoryContextSwitchTo(oldcontext);
2895 :
2896 308 : return slot;
2897 : }
2898 :
2899 : /*
2900 : * Initialize or reset an EvalPlanQual state tree
2901 : */
2902 : void
2903 364 : EvalPlanQualBegin(EPQState *epqstate)
2904 : {
2905 364 : EState *parentestate = epqstate->parentestate;
2906 364 : EState *recheckestate = epqstate->recheckestate;
2907 :
2908 364 : if (recheckestate == NULL)
2909 : {
2910 : /* First time through, so create a child EState */
2911 220 : EvalPlanQualStart(epqstate, epqstate->plan);
2912 : }
2913 : else
2914 : {
2915 : /*
2916 : * We already have a suitable child EPQ tree, so just reset it.
2917 : */
2918 144 : Index rtsize = parentestate->es_range_table_size;
2919 144 : PlanState *rcplanstate = epqstate->recheckplanstate;
2920 :
2921 : /*
2922 : * Reset the relsubs_done[] flags to equal relsubs_blocked[], so that
2923 : * the EPQ run will never attempt to fetch tuples from blocked target
2924 : * relations.
2925 : */
2926 144 : memcpy(epqstate->relsubs_done, epqstate->relsubs_blocked,
2927 : rtsize * sizeof(bool));
2928 :
2929 : /* Recopy current values of parent parameters */
2930 144 : if (parentestate->es_plannedstmt->paramExecTypes != NIL)
2931 : {
2932 : int i;
2933 :
2934 : /*
2935 : * Force evaluation of any InitPlan outputs that could be needed
2936 : * by the subplan, just in case they got reset since
2937 : * EvalPlanQualStart (see comments therein).
2938 : */
2939 144 : ExecSetParamPlanMulti(rcplanstate->plan->extParam,
2940 144 : GetPerTupleExprContext(parentestate));
2941 :
2942 144 : i = list_length(parentestate->es_plannedstmt->paramExecTypes);
2943 :
2944 306 : while (--i >= 0)
2945 : {
2946 : /* copy value if any, but not execPlan link */
2947 162 : recheckestate->es_param_exec_vals[i].value =
2948 162 : parentestate->es_param_exec_vals[i].value;
2949 162 : recheckestate->es_param_exec_vals[i].isnull =
2950 162 : parentestate->es_param_exec_vals[i].isnull;
2951 : }
2952 : }
2953 :
2954 : /*
2955 : * Mark child plan tree as needing rescan at all scan nodes. The
2956 : * first ExecProcNode will take care of actually doing the rescan.
2957 : */
2958 144 : rcplanstate->chgParam = bms_add_member(rcplanstate->chgParam,
2959 : epqstate->epqParam);
2960 : }
2961 364 : }
2962 :
2963 : /*
2964 : * Start execution of an EvalPlanQual plan tree.
2965 : *
2966 : * This is a cut-down version of ExecutorStart(): we copy some state from
2967 : * the top-level estate rather than initializing it fresh.
2968 : */
2969 : static void
2970 220 : EvalPlanQualStart(EPQState *epqstate, Plan *planTree)
2971 : {
2972 220 : EState *parentestate = epqstate->parentestate;
2973 220 : Index rtsize = parentestate->es_range_table_size;
2974 : EState *rcestate;
2975 : MemoryContext oldcontext;
2976 : ListCell *l;
2977 :
2978 220 : epqstate->recheckestate = rcestate = CreateExecutorState();
2979 :
2980 220 : oldcontext = MemoryContextSwitchTo(rcestate->es_query_cxt);
2981 :
2982 : /* signal that this is an EState for executing EPQ */
2983 220 : rcestate->es_epq_active = epqstate;
2984 :
2985 : /*
2986 : * Child EPQ EStates share the parent's copy of unchanging state such as
2987 : * the snapshot, rangetable, and external Param info. They need their own
2988 : * copies of local state, including a tuple table, es_param_exec_vals,
2989 : * result-rel info, etc.
2990 : */
2991 220 : rcestate->es_direction = ForwardScanDirection;
2992 220 : rcestate->es_snapshot = parentestate->es_snapshot;
2993 220 : rcestate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
2994 220 : rcestate->es_range_table = parentestate->es_range_table;
2995 220 : rcestate->es_range_table_size = parentestate->es_range_table_size;
2996 220 : rcestate->es_relations = parentestate->es_relations;
2997 220 : rcestate->es_rowmarks = parentestate->es_rowmarks;
2998 220 : rcestate->es_rteperminfos = parentestate->es_rteperminfos;
2999 220 : rcestate->es_plannedstmt = parentestate->es_plannedstmt;
3000 220 : rcestate->es_junkFilter = parentestate->es_junkFilter;
3001 220 : rcestate->es_output_cid = parentestate->es_output_cid;
3002 220 : rcestate->es_queryEnv = parentestate->es_queryEnv;
3003 :
3004 : /*
3005 : * ResultRelInfos needed by subplans are initialized from scratch when the
3006 : * subplans themselves are initialized.
3007 : */
3008 220 : rcestate->es_result_relations = NULL;
3009 : /* es_trig_target_relations must NOT be copied */
3010 220 : rcestate->es_top_eflags = parentestate->es_top_eflags;
3011 220 : rcestate->es_instrument = parentestate->es_instrument;
3012 : /* es_auxmodifytables must NOT be copied */
3013 :
3014 : /*
3015 : * The external param list is simply shared from parent. The internal
3016 : * param workspace has to be local state, but we copy the initial values
3017 : * from the parent, so as to have access to any param values that were
3018 : * already set from other parts of the parent's plan tree.
3019 : */
3020 220 : rcestate->es_param_list_info = parentestate->es_param_list_info;
3021 220 : if (parentestate->es_plannedstmt->paramExecTypes != NIL)
3022 : {
3023 : int i;
3024 :
3025 : /*
3026 : * Force evaluation of any InitPlan outputs that could be needed by
3027 : * the subplan. (With more complexity, maybe we could postpone this
3028 : * till the subplan actually demands them, but it doesn't seem worth
3029 : * the trouble; this is a corner case already, since usually the
3030 : * InitPlans would have been evaluated before reaching EvalPlanQual.)
3031 : *
3032 : * This will not touch output params of InitPlans that occur somewhere
3033 : * within the subplan tree, only those that are attached to the
3034 : * ModifyTable node or above it and are referenced within the subplan.
3035 : * That's OK though, because the planner would only attach such
3036 : * InitPlans to a lower-level SubqueryScan node, and EPQ execution
3037 : * will not descend into a SubqueryScan.
3038 : *
3039 : * The EState's per-output-tuple econtext is sufficiently short-lived
3040 : * for this, since it should get reset before there is any chance of
3041 : * doing EvalPlanQual again.
3042 : */
3043 220 : ExecSetParamPlanMulti(planTree->extParam,
3044 220 : GetPerTupleExprContext(parentestate));
3045 :
3046 : /* now make the internal param workspace ... */
3047 220 : i = list_length(parentestate->es_plannedstmt->paramExecTypes);
3048 220 : rcestate->es_param_exec_vals = (ParamExecData *)
3049 220 : palloc0(i * sizeof(ParamExecData));
3050 : /* ... and copy down all values, whether really needed or not */
3051 540 : while (--i >= 0)
3052 : {
3053 : /* copy value if any, but not execPlan link */
3054 320 : rcestate->es_param_exec_vals[i].value =
3055 320 : parentestate->es_param_exec_vals[i].value;
3056 320 : rcestate->es_param_exec_vals[i].isnull =
3057 320 : parentestate->es_param_exec_vals[i].isnull;
3058 : }
3059 : }
3060 :
3061 : /*
3062 : * Copy es_unpruned_relids so that pruned relations are ignored by
3063 : * ExecInitLockRows() and ExecInitModifyTable() when initializing the plan
3064 : * trees below.
3065 : */
3066 220 : rcestate->es_unpruned_relids = parentestate->es_unpruned_relids;
3067 :
3068 : /*
3069 : * Initialize private state information for each SubPlan. We must do this
3070 : * before running ExecInitNode on the main query tree, since
3071 : * ExecInitSubPlan expects to be able to find these entries. Some of the
3072 : * SubPlans might not be used in the part of the plan tree we intend to
3073 : * run, but since it's not easy to tell which, we just initialize them
3074 : * all.
3075 : */
3076 : Assert(rcestate->es_subplanstates == NIL);
3077 278 : foreach(l, parentestate->es_plannedstmt->subplans)
3078 : {
3079 58 : Plan *subplan = (Plan *) lfirst(l);
3080 : PlanState *subplanstate;
3081 :
3082 58 : subplanstate = ExecInitNode(subplan, rcestate, 0);
3083 58 : rcestate->es_subplanstates = lappend(rcestate->es_subplanstates,
3084 : subplanstate);
3085 : }
3086 :
3087 : /*
3088 : * Build an RTI indexed array of rowmarks, so that
3089 : * EvalPlanQualFetchRowMark() can efficiently access the to be fetched
3090 : * rowmark.
3091 : */
3092 220 : epqstate->relsubs_rowmark = (ExecAuxRowMark **)
3093 220 : palloc0(rtsize * sizeof(ExecAuxRowMark *));
3094 232 : foreach(l, epqstate->arowMarks)
3095 : {
3096 12 : ExecAuxRowMark *earm = (ExecAuxRowMark *) lfirst(l);
3097 :
3098 12 : epqstate->relsubs_rowmark[earm->rowmark->rti - 1] = earm;
3099 : }
3100 :
3101 : /*
3102 : * Initialize per-relation EPQ tuple states. Result relations, if any,
3103 : * get marked as blocked; others as not-fetched.
3104 : */
3105 220 : epqstate->relsubs_done = palloc_array(bool, rtsize);
3106 220 : epqstate->relsubs_blocked = palloc0_array(bool, rtsize);
3107 :
3108 440 : foreach(l, epqstate->resultRelations)
3109 : {
3110 220 : int rtindex = lfirst_int(l);
3111 :
3112 : Assert(rtindex > 0 && rtindex <= rtsize);
3113 220 : epqstate->relsubs_blocked[rtindex - 1] = true;
3114 : }
3115 :
3116 220 : memcpy(epqstate->relsubs_done, epqstate->relsubs_blocked,
3117 : rtsize * sizeof(bool));
3118 :
3119 : /*
3120 : * Initialize the private state information for all the nodes in the part
3121 : * of the plan tree we need to run. This opens files, allocates storage
3122 : * and leaves us ready to start processing tuples.
3123 : */
3124 220 : epqstate->recheckplanstate = ExecInitNode(planTree, rcestate, 0);
3125 :
3126 220 : MemoryContextSwitchTo(oldcontext);
3127 220 : }
3128 :
3129 : /*
3130 : * EvalPlanQualEnd -- shut down at termination of parent plan state node,
3131 : * or if we are done with the current EPQ child.
3132 : *
3133 : * This is a cut-down version of ExecutorEnd(); basically we want to do most
3134 : * of the normal cleanup, but *not* close result relations (which we are
3135 : * just sharing from the outer query). We do, however, have to close any
3136 : * result and trigger target relations that got opened, since those are not
3137 : * shared. (There probably shouldn't be any of the latter, but just in
3138 : * case...)
3139 : */
3140 : void
3141 382628 : EvalPlanQualEnd(EPQState *epqstate)
3142 : {
3143 382628 : EState *estate = epqstate->recheckestate;
3144 : Index rtsize;
3145 : MemoryContext oldcontext;
3146 : ListCell *l;
3147 :
3148 382628 : rtsize = epqstate->parentestate->es_range_table_size;
3149 :
3150 : /*
3151 : * We may have a tuple table, even if EPQ wasn't started, because we allow
3152 : * use of EvalPlanQualSlot() without calling EvalPlanQualBegin().
3153 : */
3154 382628 : if (epqstate->tuple_table != NIL)
3155 : {
3156 5726 : memset(epqstate->relsubs_slot, 0,
3157 : rtsize * sizeof(TupleTableSlot *));
3158 5726 : ExecResetTupleTable(epqstate->tuple_table, true);
3159 5726 : epqstate->tuple_table = NIL;
3160 : }
3161 :
3162 : /* EPQ wasn't started, nothing further to do */
3163 382628 : if (estate == NULL)
3164 382420 : return;
3165 :
3166 208 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3167 :
3168 208 : ExecEndNode(epqstate->recheckplanstate);
3169 :
3170 260 : foreach(l, estate->es_subplanstates)
3171 : {
3172 52 : PlanState *subplanstate = (PlanState *) lfirst(l);
3173 :
3174 52 : ExecEndNode(subplanstate);
3175 : }
3176 :
3177 : /* throw away the per-estate tuple table, some node may have used it */
3178 208 : ExecResetTupleTable(estate->es_tupleTable, false);
3179 :
3180 : /* Close any result and trigger target relations attached to this EState */
3181 208 : ExecCloseResultRelations(estate);
3182 :
3183 208 : MemoryContextSwitchTo(oldcontext);
3184 :
3185 208 : FreeExecutorState(estate);
3186 :
3187 : /* Mark EPQState idle */
3188 208 : epqstate->origslot = NULL;
3189 208 : epqstate->recheckestate = NULL;
3190 208 : epqstate->recheckplanstate = NULL;
3191 208 : epqstate->relsubs_rowmark = NULL;
3192 208 : epqstate->relsubs_done = NULL;
3193 208 : epqstate->relsubs_blocked = NULL;
3194 : }
|