Line data Source code
1 : /*-------------------------------------------------------------------------
2 : *
3 : * execMain.c
4 : * top level executor interface routines
5 : *
6 : * INTERFACE ROUTINES
7 : * ExecutorStart()
8 : * ExecutorRun()
9 : * ExecutorFinish()
10 : * ExecutorEnd()
11 : *
12 : * These four procedures are the external interface to the executor.
13 : * In each case, the query descriptor is required as an argument.
14 : *
15 : * ExecutorStart must be called at the beginning of execution of any
16 : * query plan and ExecutorEnd must always be called at the end of
17 : * execution of a plan (unless it is aborted due to error).
18 : *
19 : * ExecutorRun accepts direction and count arguments that specify whether
20 : * the plan is to be executed forwards, backwards, and for how many tuples.
21 : * In some cases ExecutorRun may be called multiple times to process all
22 : * the tuples for a plan. It is also acceptable to stop short of executing
23 : * the whole plan (but only if it is a SELECT).
24 : *
25 : * ExecutorFinish must be called after the final ExecutorRun call and
26 : * before ExecutorEnd. This can be omitted only in case of EXPLAIN,
27 : * which should also omit ExecutorRun.
28 : *
29 : * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
30 : * Portions Copyright (c) 1994, Regents of the University of California
31 : *
32 : *
33 : * IDENTIFICATION
34 : * src/backend/executor/execMain.c
35 : *
36 : *-------------------------------------------------------------------------
37 : */
38 : #include "postgres.h"
39 :
40 : #include "access/heapam.h"
41 : #include "access/htup_details.h"
42 : #include "access/sysattr.h"
43 : #include "access/tableam.h"
44 : #include "access/transam.h"
45 : #include "access/xact.h"
46 : #include "catalog/namespace.h"
47 : #include "catalog/partition.h"
48 : #include "catalog/pg_publication.h"
49 : #include "commands/matview.h"
50 : #include "commands/trigger.h"
51 : #include "executor/execdebug.h"
52 : #include "executor/nodeSubplan.h"
53 : #include "foreign/fdwapi.h"
54 : #include "jit/jit.h"
55 : #include "mb/pg_wchar.h"
56 : #include "miscadmin.h"
57 : #include "parser/parse_relation.h"
58 : #include "parser/parsetree.h"
59 : #include "storage/bufmgr.h"
60 : #include "storage/lmgr.h"
61 : #include "tcop/utility.h"
62 : #include "utils/acl.h"
63 : #include "utils/backend_status.h"
64 : #include "utils/lsyscache.h"
65 : #include "utils/memutils.h"
66 : #include "utils/partcache.h"
67 : #include "utils/rls.h"
68 : #include "utils/ruleutils.h"
69 : #include "utils/snapmgr.h"
70 :
71 :
72 : /* Hooks for plugins to get control in ExecutorStart/Run/Finish/End */
73 : ExecutorStart_hook_type ExecutorStart_hook = NULL;
74 : ExecutorRun_hook_type ExecutorRun_hook = NULL;
75 : ExecutorFinish_hook_type ExecutorFinish_hook = NULL;
76 : ExecutorEnd_hook_type ExecutorEnd_hook = NULL;
77 :
78 : /* Hook for plugin to get control in ExecCheckPermissions() */
79 : ExecutorCheckPerms_hook_type ExecutorCheckPerms_hook = NULL;
80 :
81 : /* decls for local routines only used within this module */
82 : static void InitPlan(QueryDesc *queryDesc, int eflags);
83 : static void CheckValidRowMarkRel(Relation rel, RowMarkType markType);
84 : static void ExecPostprocessPlan(EState *estate);
85 : static void ExecEndPlan(PlanState *planstate, EState *estate);
86 : static void ExecutePlan(EState *estate, PlanState *planstate,
87 : bool use_parallel_mode,
88 : CmdType operation,
89 : bool sendTuples,
90 : uint64 numberTuples,
91 : ScanDirection direction,
92 : DestReceiver *dest,
93 : bool execute_once);
94 : static bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo);
95 : static bool ExecCheckPermissionsModified(Oid relOid, Oid userid,
96 : Bitmapset *modifiedCols,
97 : AclMode requiredPerms);
98 : static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
99 : static char *ExecBuildSlotValueDescription(Oid reloid,
100 : TupleTableSlot *slot,
101 : TupleDesc tupdesc,
102 : Bitmapset *modifiedCols,
103 : int maxfieldlen);
104 : static void EvalPlanQualStart(EPQState *epqstate, Plan *planTree);
105 :
106 : /* end of local decls */
107 :
108 :
109 : /* ----------------------------------------------------------------
110 : * ExecutorStart
111 : *
112 : * This routine must be called at the beginning of any execution of any
113 : * query plan
114 : *
115 : * Takes a QueryDesc previously created by CreateQueryDesc (which is separate
116 : * only because some places use QueryDescs for utility commands). The tupDesc
117 : * field of the QueryDesc is filled in to describe the tuples that will be
118 : * returned, and the internal fields (estate and planstate) are set up.
119 : *
120 : * eflags contains flag bits as described in executor.h.
121 : *
122 : * NB: the CurrentMemoryContext when this is called will become the parent
123 : * of the per-query context used for this Executor invocation.
124 : *
125 : * We provide a function hook variable that lets loadable plugins
126 : * get control when ExecutorStart is called. Such a plugin would
127 : * normally call standard_ExecutorStart().
128 : *
129 : * ----------------------------------------------------------------
130 : */
131 : void
132 569120 : ExecutorStart(QueryDesc *queryDesc, int eflags)
133 : {
134 : /*
135 : * In some cases (e.g. an EXECUTE statement) a query execution will skip
136 : * parse analysis, which means that the query_id won't be reported. Note
137 : * that it's harmless to report the query_id multiple times, as the call
138 : * will be ignored if the top level query_id has already been reported.
139 : */
140 569120 : pgstat_report_query_id(queryDesc->plannedstmt->queryId, false);
141 :
142 569120 : if (ExecutorStart_hook)
143 88826 : (*ExecutorStart_hook) (queryDesc, eflags);
144 : else
145 480294 : standard_ExecutorStart(queryDesc, eflags);
146 567466 : }
147 :
148 : void
149 569120 : standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
150 : {
151 : EState *estate;
152 : MemoryContext oldcontext;
153 :
154 : /* sanity checks: queryDesc must not be started already */
155 : Assert(queryDesc != NULL);
156 : Assert(queryDesc->estate == NULL);
157 :
158 : /*
159 : * If the transaction is read-only, we need to check if any writes are
160 : * planned to non-temporary tables. EXPLAIN is considered read-only.
161 : *
162 : * Don't allow writes in parallel mode. Supporting UPDATE and DELETE
163 : * would require (a) storing the combo CID hash in shared memory, rather
164 : * than synchronizing it just once at the start of parallelism, and (b) an
165 : * alternative to heap_update()'s reliance on xmax for mutual exclusion.
166 : * INSERT may have no such troubles, but we forbid it to simplify the
167 : * checks.
168 : *
169 : * We have lower-level defenses in CommandCounterIncrement and elsewhere
170 : * against performing unsafe operations in parallel mode, but this gives a
171 : * more user-friendly error message.
172 : */
173 569120 : if ((XactReadOnly || IsInParallelMode()) &&
174 127608 : !(eflags & EXEC_FLAG_EXPLAIN_ONLY))
175 127608 : ExecCheckXactReadOnly(queryDesc->plannedstmt);
176 :
177 : /*
178 : * Build EState, switch into per-query memory context for startup.
179 : */
180 569104 : estate = CreateExecutorState();
181 569104 : queryDesc->estate = estate;
182 :
183 569104 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
184 :
185 : /*
186 : * Fill in external parameters, if any, from queryDesc; and allocate
187 : * workspace for internal parameters
188 : */
189 569104 : estate->es_param_list_info = queryDesc->params;
190 :
191 569104 : if (queryDesc->plannedstmt->paramExecTypes != NIL)
192 : {
193 : int nParamExec;
194 :
195 159978 : nParamExec = list_length(queryDesc->plannedstmt->paramExecTypes);
196 159978 : estate->es_param_exec_vals = (ParamExecData *)
197 159978 : palloc0(nParamExec * sizeof(ParamExecData));
198 : }
199 :
200 : /* We now require all callers to provide sourceText */
201 : Assert(queryDesc->sourceText != NULL);
202 569104 : estate->es_sourceText = queryDesc->sourceText;
203 :
204 : /*
205 : * Fill in the query environment, if any, from queryDesc.
206 : */
207 569104 : estate->es_queryEnv = queryDesc->queryEnv;
208 :
209 : /*
210 : * If non-read-only query, set the command ID to mark output tuples with
211 : */
212 569104 : switch (queryDesc->operation)
213 : {
214 465704 : case CMD_SELECT:
215 :
216 : /*
217 : * SELECT FOR [KEY] UPDATE/SHARE and modifying CTEs need to mark
218 : * tuples
219 : */
220 465704 : if (queryDesc->plannedstmt->rowMarks != NIL ||
221 458628 : queryDesc->plannedstmt->hasModifyingCTE)
222 7204 : estate->es_output_cid = GetCurrentCommandId(true);
223 :
224 : /*
225 : * A SELECT without modifying CTEs can't possibly queue triggers,
226 : * so force skip-triggers mode. This is just a marginal efficiency
227 : * hack, since AfterTriggerBeginQuery/AfterTriggerEndQuery aren't
228 : * all that expensive, but we might as well do it.
229 : */
230 465704 : if (!queryDesc->plannedstmt->hasModifyingCTE)
231 465576 : eflags |= EXEC_FLAG_SKIP_TRIGGERS;
232 465704 : break;
233 :
234 103400 : case CMD_INSERT:
235 : case CMD_DELETE:
236 : case CMD_UPDATE:
237 : case CMD_MERGE:
238 103400 : estate->es_output_cid = GetCurrentCommandId(true);
239 103400 : break;
240 :
241 0 : default:
242 0 : elog(ERROR, "unrecognized operation code: %d",
243 : (int) queryDesc->operation);
244 : break;
245 : }
246 :
247 : /*
248 : * Copy other important information into the EState
249 : */
250 569104 : estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot);
251 569104 : estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot);
252 569104 : estate->es_top_eflags = eflags;
253 569104 : estate->es_instrument = queryDesc->instrument_options;
254 569104 : estate->es_jit_flags = queryDesc->plannedstmt->jitFlags;
255 :
256 : /*
257 : * Set up an AFTER-trigger statement context, unless told not to, or
258 : * unless it's EXPLAIN-only mode (when ExecutorFinish won't be called).
259 : */
260 569104 : if (!(eflags & (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
261 102154 : AfterTriggerBeginQuery();
262 :
263 : /*
264 : * Initialize the plan state tree
265 : */
266 569104 : InitPlan(queryDesc, eflags);
267 :
268 567466 : MemoryContextSwitchTo(oldcontext);
269 567466 : }
270 :
271 : /* ----------------------------------------------------------------
272 : * ExecutorRun
273 : *
274 : * This is the main routine of the executor module. It accepts
275 : * the query descriptor from the traffic cop and executes the
276 : * query plan.
277 : *
278 : * ExecutorStart must have been called already.
279 : *
280 : * If direction is NoMovementScanDirection then nothing is done
281 : * except to start up/shut down the destination. Otherwise,
282 : * we retrieve up to 'count' tuples in the specified direction.
283 : *
284 : * Note: count = 0 is interpreted as no portal limit, i.e., run to
285 : * completion. Also note that the count limit is only applied to
286 : * retrieved tuples, not for instance to those inserted/updated/deleted
287 : * by a ModifyTable plan node.
288 : *
289 : * There is no return value, but output tuples (if any) are sent to
290 : * the destination receiver specified in the QueryDesc; and the number
291 : * of tuples processed at the top level can be found in
292 : * estate->es_processed. The total number of tuples processed in all
293 : * the ExecutorRun calls can be found in estate->es_total_processed.
294 : *
295 : * We provide a function hook variable that lets loadable plugins
296 : * get control when ExecutorRun is called. Such a plugin would
297 : * normally call standard_ExecutorRun().
298 : *
299 : * ----------------------------------------------------------------
300 : */
301 : void
302 562352 : ExecutorRun(QueryDesc *queryDesc,
303 : ScanDirection direction, uint64 count,
304 : bool execute_once)
305 : {
306 562352 : if (ExecutorRun_hook)
307 86576 : (*ExecutorRun_hook) (queryDesc, direction, count, execute_once);
308 : else
309 475776 : standard_ExecutorRun(queryDesc, direction, count, execute_once);
310 540562 : }
311 :
312 : void
313 562352 : standard_ExecutorRun(QueryDesc *queryDesc,
314 : ScanDirection direction, uint64 count, bool execute_once)
315 : {
316 : EState *estate;
317 : CmdType operation;
318 : DestReceiver *dest;
319 : bool sendTuples;
320 : MemoryContext oldcontext;
321 :
322 : /* sanity checks */
323 : Assert(queryDesc != NULL);
324 :
325 562352 : estate = queryDesc->estate;
326 :
327 : Assert(estate != NULL);
328 : Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
329 :
330 : /*
331 : * Switch into per-query memory context
332 : */
333 562352 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
334 :
335 : /* Allow instrumentation of Executor overall runtime */
336 562352 : if (queryDesc->totaltime)
337 54188 : InstrStartNode(queryDesc->totaltime);
338 :
339 : /*
340 : * extract information from the query descriptor and the query feature.
341 : */
342 562352 : operation = queryDesc->operation;
343 562352 : dest = queryDesc->dest;
344 :
345 : /*
346 : * startup tuple receiver, if we will be emitting tuples
347 : */
348 562352 : estate->es_processed = 0;
349 :
350 664048 : sendTuples = (operation == CMD_SELECT ||
351 101696 : queryDesc->plannedstmt->hasReturning);
352 :
353 562352 : if (sendTuples)
354 464302 : dest->rStartup(dest, operation, queryDesc->tupDesc);
355 :
356 : /*
357 : * run plan
358 : */
359 562314 : if (!ScanDirectionIsNoMovement(direction))
360 : {
361 561102 : if (execute_once && queryDesc->already_executed)
362 0 : elog(ERROR, "can't re-execute query flagged for single execution");
363 561102 : queryDesc->already_executed = true;
364 :
365 561102 : ExecutePlan(estate,
366 : queryDesc->planstate,
367 561102 : queryDesc->plannedstmt->parallelModeNeeded,
368 : operation,
369 : sendTuples,
370 : count,
371 : direction,
372 : dest,
373 : execute_once);
374 : }
375 :
376 : /*
377 : * Update es_total_processed to keep track of the number of tuples
378 : * processed across multiple ExecutorRun() calls.
379 : */
380 540562 : estate->es_total_processed += estate->es_processed;
381 :
382 : /*
383 : * shutdown tuple receiver, if we started it
384 : */
385 540562 : if (sendTuples)
386 445176 : dest->rShutdown(dest);
387 :
388 540562 : if (queryDesc->totaltime)
389 52254 : InstrStopNode(queryDesc->totaltime, estate->es_processed);
390 :
391 540562 : MemoryContextSwitchTo(oldcontext);
392 540562 : }
393 :
394 : /* ----------------------------------------------------------------
395 : * ExecutorFinish
396 : *
397 : * This routine must be called after the last ExecutorRun call.
398 : * It performs cleanup such as firing AFTER triggers. It is
399 : * separate from ExecutorEnd because EXPLAIN ANALYZE needs to
400 : * include these actions in the total runtime.
401 : *
402 : * We provide a function hook variable that lets loadable plugins
403 : * get control when ExecutorFinish is called. Such a plugin would
404 : * normally call standard_ExecutorFinish().
405 : *
406 : * ----------------------------------------------------------------
407 : */
408 : void
409 527910 : ExecutorFinish(QueryDesc *queryDesc)
410 : {
411 527910 : if (ExecutorFinish_hook)
412 77058 : (*ExecutorFinish_hook) (queryDesc);
413 : else
414 450852 : standard_ExecutorFinish(queryDesc);
415 527040 : }
416 :
417 : void
418 527910 : standard_ExecutorFinish(QueryDesc *queryDesc)
419 : {
420 : EState *estate;
421 : MemoryContext oldcontext;
422 :
423 : /* sanity checks */
424 : Assert(queryDesc != NULL);
425 :
426 527910 : estate = queryDesc->estate;
427 :
428 : Assert(estate != NULL);
429 : Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
430 :
431 : /* This should be run once and only once per Executor instance */
432 : Assert(!estate->es_finished);
433 :
434 : /* Switch into per-query memory context */
435 527910 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
436 :
437 : /* Allow instrumentation of Executor overall runtime */
438 527910 : if (queryDesc->totaltime)
439 52254 : InstrStartNode(queryDesc->totaltime);
440 :
441 : /* Run ModifyTable nodes to completion */
442 527910 : ExecPostprocessPlan(estate);
443 :
444 : /* Execute queued AFTER triggers, unless told not to */
445 527910 : if (!(estate->es_top_eflags & EXEC_FLAG_SKIP_TRIGGERS))
446 98438 : AfterTriggerEndQuery(estate);
447 :
448 527040 : if (queryDesc->totaltime)
449 52006 : InstrStopNode(queryDesc->totaltime, 0);
450 :
451 527040 : MemoryContextSwitchTo(oldcontext);
452 :
453 527040 : estate->es_finished = true;
454 527040 : }
455 :
456 : /* ----------------------------------------------------------------
457 : * ExecutorEnd
458 : *
459 : * This routine must be called at the end of execution of any
460 : * query plan
461 : *
462 : * We provide a function hook variable that lets loadable plugins
463 : * get control when ExecutorEnd is called. Such a plugin would
464 : * normally call standard_ExecutorEnd().
465 : *
466 : * ----------------------------------------------------------------
467 : */
468 : void
469 544428 : ExecutorEnd(QueryDesc *queryDesc)
470 : {
471 544428 : if (ExecutorEnd_hook)
472 81708 : (*ExecutorEnd_hook) (queryDesc);
473 : else
474 462720 : standard_ExecutorEnd(queryDesc);
475 544428 : }
476 :
477 : void
478 544428 : standard_ExecutorEnd(QueryDesc *queryDesc)
479 : {
480 : EState *estate;
481 : MemoryContext oldcontext;
482 :
483 : /* sanity checks */
484 : Assert(queryDesc != NULL);
485 :
486 544428 : estate = queryDesc->estate;
487 :
488 : Assert(estate != NULL);
489 :
490 : /*
491 : * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
492 : * Assert is needed because ExecutorFinish is new as of 9.1, and callers
493 : * might forget to call it.
494 : */
495 : Assert(estate->es_finished ||
496 : (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
497 :
498 : /*
499 : * Switch into per-query memory context to run ExecEndPlan
500 : */
501 544428 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
502 :
503 544428 : ExecEndPlan(queryDesc->planstate, estate);
504 :
505 : /* do away with our snapshots */
506 544428 : UnregisterSnapshot(estate->es_snapshot);
507 544428 : UnregisterSnapshot(estate->es_crosscheck_snapshot);
508 :
509 : /*
510 : * Must switch out of context before destroying it
511 : */
512 544428 : MemoryContextSwitchTo(oldcontext);
513 :
514 : /*
515 : * Release EState and per-query memory context. This should release
516 : * everything the executor has allocated.
517 : */
518 544428 : FreeExecutorState(estate);
519 :
520 : /* Reset queryDesc fields that no longer point to anything */
521 544428 : queryDesc->tupDesc = NULL;
522 544428 : queryDesc->estate = NULL;
523 544428 : queryDesc->planstate = NULL;
524 544428 : queryDesc->totaltime = NULL;
525 544428 : }
526 :
527 : /* ----------------------------------------------------------------
528 : * ExecutorRewind
529 : *
530 : * This routine may be called on an open queryDesc to rewind it
531 : * to the start.
532 : * ----------------------------------------------------------------
533 : */
534 : void
535 140 : ExecutorRewind(QueryDesc *queryDesc)
536 : {
537 : EState *estate;
538 : MemoryContext oldcontext;
539 :
540 : /* sanity checks */
541 : Assert(queryDesc != NULL);
542 :
543 140 : estate = queryDesc->estate;
544 :
545 : Assert(estate != NULL);
546 :
547 : /* It's probably not sensible to rescan updating queries */
548 : Assert(queryDesc->operation == CMD_SELECT);
549 :
550 : /*
551 : * Switch into per-query memory context
552 : */
553 140 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
554 :
555 : /*
556 : * rescan plan
557 : */
558 140 : ExecReScan(queryDesc->planstate);
559 :
560 140 : MemoryContextSwitchTo(oldcontext);
561 140 : }
562 :
563 :
564 : /*
565 : * ExecCheckPermissions
566 : * Check access permissions of relations mentioned in a query
567 : *
568 : * Returns true if permissions are adequate. Otherwise, throws an appropriate
569 : * error if ereport_on_violation is true, or simply returns false otherwise.
570 : *
571 : * Note that this does NOT address row-level security policies (aka: RLS). If
572 : * rows will be returned to the user as a result of this permission check
573 : * passing, then RLS also needs to be consulted (and check_enable_rls()).
574 : *
575 : * See rewrite/rowsecurity.c.
576 : *
577 : * NB: rangeTable is no longer used by us, but kept around for the hooks that
578 : * might still want to look at the RTEs.
579 : */
580 : bool
581 578482 : ExecCheckPermissions(List *rangeTable, List *rteperminfos,
582 : bool ereport_on_violation)
583 : {
584 : ListCell *l;
585 578482 : bool result = true;
586 :
587 : #ifdef USE_ASSERT_CHECKING
588 : Bitmapset *indexset = NULL;
589 :
590 : /* Check that rteperminfos is consistent with rangeTable */
591 : foreach(l, rangeTable)
592 : {
593 : RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
594 :
595 : if (rte->perminfoindex != 0)
596 : {
597 : /* Sanity checks */
598 :
599 : /*
600 : * Only relation RTEs and subquery RTEs that were once relation
601 : * RTEs (views) have their perminfoindex set.
602 : */
603 : Assert(rte->rtekind == RTE_RELATION ||
604 : (rte->rtekind == RTE_SUBQUERY &&
605 : rte->relkind == RELKIND_VIEW));
606 :
607 : (void) getRTEPermissionInfo(rteperminfos, rte);
608 : /* Many-to-one mapping not allowed */
609 : Assert(!bms_is_member(rte->perminfoindex, indexset));
610 : indexset = bms_add_member(indexset, rte->perminfoindex);
611 : }
612 : }
613 :
614 : /* All rteperminfos are referenced */
615 : Assert(bms_num_members(indexset) == list_length(rteperminfos));
616 : #endif
617 :
618 1040024 : foreach(l, rteperminfos)
619 : {
620 462886 : RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
621 :
622 : Assert(OidIsValid(perminfo->relid));
623 462886 : result = ExecCheckOneRelPerms(perminfo);
624 462886 : if (!result)
625 : {
626 1344 : if (ereport_on_violation)
627 1332 : aclcheck_error(ACLCHECK_NO_PRIV,
628 1332 : get_relkind_objtype(get_rel_relkind(perminfo->relid)),
629 1332 : get_rel_name(perminfo->relid));
630 12 : return false;
631 : }
632 : }
633 :
634 577138 : if (ExecutorCheckPerms_hook)
635 12 : result = (*ExecutorCheckPerms_hook) (rangeTable, rteperminfos,
636 : ereport_on_violation);
637 577138 : return result;
638 : }
639 :
640 : /*
641 : * ExecCheckOneRelPerms
642 : * Check access permissions for a single relation.
643 : */
644 : static bool
645 462886 : ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
646 : {
647 : AclMode requiredPerms;
648 : AclMode relPerms;
649 : AclMode remainingPerms;
650 : Oid userid;
651 462886 : Oid relOid = perminfo->relid;
652 :
653 462886 : requiredPerms = perminfo->requiredPerms;
654 : Assert(requiredPerms != 0);
655 :
656 : /*
657 : * userid to check as: current user unless we have a setuid indication.
658 : *
659 : * Note: GetUserId() is presently fast enough that there's no harm in
660 : * calling it separately for each relation. If that stops being true, we
661 : * could call it once in ExecCheckPermissions and pass the userid down
662 : * from there. But for now, no need for the extra clutter.
663 : */
664 925772 : userid = OidIsValid(perminfo->checkAsUser) ?
665 462886 : perminfo->checkAsUser : GetUserId();
666 :
667 : /*
668 : * We must have *all* the requiredPerms bits, but some of the bits can be
669 : * satisfied from column-level rather than relation-level permissions.
670 : * First, remove any bits that are satisfied by relation permissions.
671 : */
672 462886 : relPerms = pg_class_aclmask(relOid, userid, requiredPerms, ACLMASK_ALL);
673 462886 : remainingPerms = requiredPerms & ~relPerms;
674 462886 : if (remainingPerms != 0)
675 : {
676 2078 : int col = -1;
677 :
678 : /*
679 : * If we lack any permissions that exist only as relation permissions,
680 : * we can fail straight away.
681 : */
682 2078 : if (remainingPerms & ~(ACL_SELECT | ACL_INSERT | ACL_UPDATE))
683 120 : return false;
684 :
685 : /*
686 : * Check to see if we have the needed privileges at column level.
687 : *
688 : * Note: failures just report a table-level error; it would be nicer
689 : * to report a column-level error if we have some but not all of the
690 : * column privileges.
691 : */
692 1958 : if (remainingPerms & ACL_SELECT)
693 : {
694 : /*
695 : * When the query doesn't explicitly reference any columns (for
696 : * example, SELECT COUNT(*) FROM table), allow the query if we
697 : * have SELECT on any column of the rel, as per SQL spec.
698 : */
699 1302 : if (bms_is_empty(perminfo->selectedCols))
700 : {
701 48 : if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
702 : ACLMASK_ANY) != ACLCHECK_OK)
703 6 : return false;
704 : }
705 :
706 2174 : while ((col = bms_next_member(perminfo->selectedCols, col)) >= 0)
707 : {
708 : /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
709 1680 : AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
710 :
711 1680 : if (attno == InvalidAttrNumber)
712 : {
713 : /* Whole-row reference, must have priv on all cols */
714 54 : if (pg_attribute_aclcheck_all(relOid, userid, ACL_SELECT,
715 : ACLMASK_ALL) != ACLCHECK_OK)
716 30 : return false;
717 : }
718 : else
719 : {
720 1626 : if (pg_attribute_aclcheck(relOid, attno, userid,
721 : ACL_SELECT) != ACLCHECK_OK)
722 772 : return false;
723 : }
724 : }
725 : }
726 :
727 : /*
728 : * Basically the same for the mod columns, for both INSERT and UPDATE
729 : * privilege as specified by remainingPerms.
730 : */
731 1150 : if (remainingPerms & ACL_INSERT &&
732 296 : !ExecCheckPermissionsModified(relOid,
733 : userid,
734 : perminfo->insertedCols,
735 : ACL_INSERT))
736 164 : return false;
737 :
738 986 : if (remainingPerms & ACL_UPDATE &&
739 576 : !ExecCheckPermissionsModified(relOid,
740 : userid,
741 : perminfo->updatedCols,
742 : ACL_UPDATE))
743 252 : return false;
744 : }
745 461542 : return true;
746 : }
747 :
748 : /*
749 : * ExecCheckPermissionsModified
750 : * Check INSERT or UPDATE access permissions for a single relation (these
751 : * are processed uniformly).
752 : */
753 : static bool
754 872 : ExecCheckPermissionsModified(Oid relOid, Oid userid, Bitmapset *modifiedCols,
755 : AclMode requiredPerms)
756 : {
757 872 : int col = -1;
758 :
759 : /*
760 : * When the query doesn't explicitly update any columns, allow the query
761 : * if we have permission on any column of the rel. This is to handle
762 : * SELECT FOR UPDATE as well as possible corner cases in UPDATE.
763 : */
764 872 : if (bms_is_empty(modifiedCols))
765 : {
766 48 : if (pg_attribute_aclcheck_all(relOid, userid, requiredPerms,
767 : ACLMASK_ANY) != ACLCHECK_OK)
768 48 : return false;
769 : }
770 :
771 1394 : while ((col = bms_next_member(modifiedCols, col)) >= 0)
772 : {
773 : /* bit #s are offset by FirstLowInvalidHeapAttributeNumber */
774 938 : AttrNumber attno = col + FirstLowInvalidHeapAttributeNumber;
775 :
776 938 : if (attno == InvalidAttrNumber)
777 : {
778 : /* whole-row reference can't happen here */
779 0 : elog(ERROR, "whole-row update is not implemented");
780 : }
781 : else
782 : {
783 938 : if (pg_attribute_aclcheck(relOid, attno, userid,
784 : requiredPerms) != ACLCHECK_OK)
785 368 : return false;
786 : }
787 : }
788 456 : return true;
789 : }
790 :
791 : /*
792 : * Check that the query does not imply any writes to non-temp tables;
793 : * unless we're in parallel mode, in which case don't even allow writes
794 : * to temp tables.
795 : *
796 : * Note: in a Hot Standby this would need to reject writes to temp
797 : * tables just as we do in parallel mode; but an HS standby can't have created
798 : * any temp tables in the first place, so no need to check that.
799 : */
800 : static void
801 127608 : ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
802 : {
803 : ListCell *l;
804 :
805 : /*
806 : * Fail if write permissions are requested in parallel mode for table
807 : * (temp or non-temp), otherwise fail for any non-temp table.
808 : */
809 207044 : foreach(l, plannedstmt->permInfos)
810 : {
811 79452 : RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
812 :
813 79452 : if ((perminfo->requiredPerms & (~ACL_SELECT)) == 0)
814 79424 : continue;
815 :
816 28 : if (isTempNamespace(get_rel_namespace(perminfo->relid)))
817 12 : continue;
818 :
819 16 : PreventCommandIfReadOnly(CreateCommandName((Node *) plannedstmt));
820 : }
821 :
822 127592 : if (plannedstmt->commandType != CMD_SELECT || plannedstmt->hasModifyingCTE)
823 12 : PreventCommandIfParallelMode(CreateCommandName((Node *) plannedstmt));
824 127592 : }
825 :
826 :
827 : /* ----------------------------------------------------------------
828 : * InitPlan
829 : *
830 : * Initializes the query plan: open files, allocate storage
831 : * and start up the rule manager
832 : * ----------------------------------------------------------------
833 : */
834 : static void
835 569104 : InitPlan(QueryDesc *queryDesc, int eflags)
836 : {
837 569104 : CmdType operation = queryDesc->operation;
838 569104 : PlannedStmt *plannedstmt = queryDesc->plannedstmt;
839 569104 : Plan *plan = plannedstmt->planTree;
840 569104 : List *rangeTable = plannedstmt->rtable;
841 569104 : EState *estate = queryDesc->estate;
842 : PlanState *planstate;
843 : TupleDesc tupType;
844 : ListCell *l;
845 : int i;
846 :
847 : /*
848 : * Do permissions checks
849 : */
850 569104 : ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true);
851 :
852 : /*
853 : * initialize the node's execution state
854 : */
855 567856 : ExecInitRangeTable(estate, rangeTable, plannedstmt->permInfos);
856 :
857 567856 : estate->es_plannedstmt = plannedstmt;
858 :
859 : /*
860 : * Next, build the ExecRowMark array from the PlanRowMark(s), if any.
861 : */
862 567856 : if (plannedstmt->rowMarks)
863 : {
864 8622 : estate->es_rowmarks = (ExecRowMark **)
865 8622 : palloc0(estate->es_range_table_size * sizeof(ExecRowMark *));
866 19642 : foreach(l, plannedstmt->rowMarks)
867 : {
868 11026 : PlanRowMark *rc = (PlanRowMark *) lfirst(l);
869 : Oid relid;
870 : Relation relation;
871 : ExecRowMark *erm;
872 :
873 : /* ignore "parent" rowmarks; they are irrelevant at runtime */
874 11026 : if (rc->isParent)
875 1550 : continue;
876 :
877 : /* get relation's OID (will produce InvalidOid if subquery) */
878 9476 : relid = exec_rt_fetch(rc->rti, estate)->relid;
879 :
880 : /* open relation, if we need to access it for this mark type */
881 9476 : switch (rc->markType)
882 : {
883 9000 : case ROW_MARK_EXCLUSIVE:
884 : case ROW_MARK_NOKEYEXCLUSIVE:
885 : case ROW_MARK_SHARE:
886 : case ROW_MARK_KEYSHARE:
887 : case ROW_MARK_REFERENCE:
888 9000 : relation = ExecGetRangeTableRelation(estate, rc->rti);
889 9000 : break;
890 476 : case ROW_MARK_COPY:
891 : /* no physical table access is required */
892 476 : relation = NULL;
893 476 : break;
894 0 : default:
895 0 : elog(ERROR, "unrecognized markType: %d", rc->markType);
896 : relation = NULL; /* keep compiler quiet */
897 : break;
898 : }
899 :
900 : /* Check that relation is a legal target for marking */
901 9476 : if (relation)
902 9000 : CheckValidRowMarkRel(relation, rc->markType);
903 :
904 9470 : erm = (ExecRowMark *) palloc(sizeof(ExecRowMark));
905 9470 : erm->relation = relation;
906 9470 : erm->relid = relid;
907 9470 : erm->rti = rc->rti;
908 9470 : erm->prti = rc->prti;
909 9470 : erm->rowmarkId = rc->rowmarkId;
910 9470 : erm->markType = rc->markType;
911 9470 : erm->strength = rc->strength;
912 9470 : erm->waitPolicy = rc->waitPolicy;
913 9470 : erm->ermActive = false;
914 9470 : ItemPointerSetInvalid(&(erm->curCtid));
915 9470 : erm->ermExtra = NULL;
916 :
917 : Assert(erm->rti > 0 && erm->rti <= estate->es_range_table_size &&
918 : estate->es_rowmarks[erm->rti - 1] == NULL);
919 :
920 9470 : estate->es_rowmarks[erm->rti - 1] = erm;
921 : }
922 : }
923 :
924 : /*
925 : * Initialize the executor's tuple table to empty.
926 : */
927 567850 : estate->es_tupleTable = NIL;
928 :
929 : /* signal that this EState is not used for EPQ */
930 567850 : estate->es_epq_active = NULL;
931 :
932 : /*
933 : * Initialize private state information for each SubPlan. We must do this
934 : * before running ExecInitNode on the main query tree, since
935 : * ExecInitSubPlan expects to be able to find these entries.
936 : */
937 : Assert(estate->es_subplanstates == NIL);
938 567850 : i = 1; /* subplan indices count from 1 */
939 602688 : foreach(l, plannedstmt->subplans)
940 : {
941 34838 : Plan *subplan = (Plan *) lfirst(l);
942 : PlanState *subplanstate;
943 : int sp_eflags;
944 :
945 : /*
946 : * A subplan will never need to do BACKWARD scan nor MARK/RESTORE. If
947 : * it is a parameterless subplan (not initplan), we suggest that it be
948 : * prepared to handle REWIND efficiently; otherwise there is no need.
949 : */
950 34838 : sp_eflags = eflags
951 : & ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK);
952 34838 : if (bms_is_member(i, plannedstmt->rewindPlanIDs))
953 42 : sp_eflags |= EXEC_FLAG_REWIND;
954 :
955 34838 : subplanstate = ExecInitNode(subplan, estate, sp_eflags);
956 :
957 34838 : estate->es_subplanstates = lappend(estate->es_subplanstates,
958 : subplanstate);
959 :
960 34838 : i++;
961 : }
962 :
963 : /*
964 : * Initialize the private state information for all the nodes in the query
965 : * tree. This opens files, allocates storage and leaves us ready to start
966 : * processing tuples.
967 : */
968 567850 : planstate = ExecInitNode(plan, estate, eflags);
969 :
970 : /*
971 : * Get the tuple descriptor describing the type of tuples to return.
972 : */
973 567466 : tupType = ExecGetResultType(planstate);
974 :
975 : /*
976 : * Initialize the junk filter if needed. SELECT queries need a filter if
977 : * there are any junk attrs in the top-level tlist.
978 : */
979 567466 : if (operation == CMD_SELECT)
980 : {
981 465008 : bool junk_filter_needed = false;
982 : ListCell *tlist;
983 :
984 1521516 : foreach(tlist, plan->targetlist)
985 : {
986 1077598 : TargetEntry *tle = (TargetEntry *) lfirst(tlist);
987 :
988 1077598 : if (tle->resjunk)
989 : {
990 21090 : junk_filter_needed = true;
991 21090 : break;
992 : }
993 : }
994 :
995 465008 : if (junk_filter_needed)
996 : {
997 : JunkFilter *j;
998 : TupleTableSlot *slot;
999 :
1000 21090 : slot = ExecInitExtraTupleSlot(estate, NULL, &TTSOpsVirtual);
1001 21090 : j = ExecInitJunkFilter(planstate->plan->targetlist,
1002 : slot);
1003 21090 : estate->es_junkFilter = j;
1004 :
1005 : /* Want to return the cleaned tuple type */
1006 21090 : tupType = j->jf_cleanTupType;
1007 : }
1008 : }
1009 :
1010 567466 : queryDesc->tupDesc = tupType;
1011 567466 : queryDesc->planstate = planstate;
1012 567466 : }
1013 :
1014 : /*
1015 : * Check that a proposed result relation is a legal target for the operation
1016 : *
1017 : * Generally the parser and/or planner should have noticed any such mistake
1018 : * already, but let's make sure.
1019 : *
1020 : * Note: when changing this function, you probably also need to look at
1021 : * CheckValidRowMarkRel.
1022 : */
1023 : void
1024 114674 : CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation)
1025 : {
1026 114674 : Relation resultRel = resultRelInfo->ri_RelationDesc;
1027 114674 : TriggerDesc *trigDesc = resultRel->trigdesc;
1028 : FdwRoutine *fdwroutine;
1029 :
1030 114674 : switch (resultRel->rd_rel->relkind)
1031 : {
1032 113578 : case RELKIND_RELATION:
1033 : case RELKIND_PARTITIONED_TABLE:
1034 113578 : CheckCmdReplicaIdentity(resultRel, operation);
1035 113320 : break;
1036 0 : case RELKIND_SEQUENCE:
1037 0 : ereport(ERROR,
1038 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1039 : errmsg("cannot change sequence \"%s\"",
1040 : RelationGetRelationName(resultRel))));
1041 : break;
1042 0 : case RELKIND_TOASTVALUE:
1043 0 : ereport(ERROR,
1044 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1045 : errmsg("cannot change TOAST relation \"%s\"",
1046 : RelationGetRelationName(resultRel))));
1047 : break;
1048 324 : case RELKIND_VIEW:
1049 :
1050 : /*
1051 : * Okay only if there's a suitable INSTEAD OF trigger. Messages
1052 : * here should match rewriteHandler.c's rewriteTargetView and
1053 : * RewriteQuery, except that we omit errdetail because we haven't
1054 : * got the information handy (and given that we really shouldn't
1055 : * get here anyway, it's not worth great exertion to get).
1056 : */
1057 : switch (operation)
1058 : {
1059 132 : case CMD_INSERT:
1060 132 : if (!trigDesc || !trigDesc->trig_insert_instead_row)
1061 0 : ereport(ERROR,
1062 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1063 : errmsg("cannot insert into view \"%s\"",
1064 : RelationGetRelationName(resultRel)),
1065 : errhint("To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule.")));
1066 132 : break;
1067 138 : case CMD_UPDATE:
1068 138 : if (!trigDesc || !trigDesc->trig_update_instead_row)
1069 0 : ereport(ERROR,
1070 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1071 : errmsg("cannot update view \"%s\"",
1072 : RelationGetRelationName(resultRel)),
1073 : errhint("To enable updating the view, provide an INSTEAD OF UPDATE trigger or an unconditional ON UPDATE DO INSTEAD rule.")));
1074 138 : break;
1075 54 : case CMD_DELETE:
1076 54 : if (!trigDesc || !trigDesc->trig_delete_instead_row)
1077 0 : ereport(ERROR,
1078 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1079 : errmsg("cannot delete from view \"%s\"",
1080 : RelationGetRelationName(resultRel)),
1081 : errhint("To enable deleting from the view, provide an INSTEAD OF DELETE trigger or an unconditional ON DELETE DO INSTEAD rule.")));
1082 54 : break;
1083 0 : default:
1084 0 : elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1085 : break;
1086 : }
1087 324 : break;
1088 120 : case RELKIND_MATVIEW:
1089 120 : if (!MatViewIncrementalMaintenanceIsEnabled())
1090 0 : ereport(ERROR,
1091 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1092 : errmsg("cannot change materialized view \"%s\"",
1093 : RelationGetRelationName(resultRel))));
1094 120 : break;
1095 652 : case RELKIND_FOREIGN_TABLE:
1096 : /* Okay only if the FDW supports it */
1097 652 : fdwroutine = resultRelInfo->ri_FdwRoutine;
1098 : switch (operation)
1099 : {
1100 304 : case CMD_INSERT:
1101 304 : if (fdwroutine->ExecForeignInsert == NULL)
1102 10 : ereport(ERROR,
1103 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1104 : errmsg("cannot insert into foreign table \"%s\"",
1105 : RelationGetRelationName(resultRel))));
1106 294 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1107 294 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_INSERT)) == 0)
1108 0 : ereport(ERROR,
1109 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1110 : errmsg("foreign table \"%s\" does not allow inserts",
1111 : RelationGetRelationName(resultRel))));
1112 294 : break;
1113 194 : case CMD_UPDATE:
1114 194 : if (fdwroutine->ExecForeignUpdate == NULL)
1115 4 : ereport(ERROR,
1116 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1117 : errmsg("cannot update foreign table \"%s\"",
1118 : RelationGetRelationName(resultRel))));
1119 190 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1120 190 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_UPDATE)) == 0)
1121 0 : ereport(ERROR,
1122 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1123 : errmsg("foreign table \"%s\" does not allow updates",
1124 : RelationGetRelationName(resultRel))));
1125 190 : break;
1126 154 : case CMD_DELETE:
1127 154 : if (fdwroutine->ExecForeignDelete == NULL)
1128 4 : ereport(ERROR,
1129 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1130 : errmsg("cannot delete from foreign table \"%s\"",
1131 : RelationGetRelationName(resultRel))));
1132 150 : if (fdwroutine->IsForeignRelUpdatable != NULL &&
1133 150 : (fdwroutine->IsForeignRelUpdatable(resultRel) & (1 << CMD_DELETE)) == 0)
1134 0 : ereport(ERROR,
1135 : (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
1136 : errmsg("foreign table \"%s\" does not allow deletes",
1137 : RelationGetRelationName(resultRel))));
1138 150 : break;
1139 0 : default:
1140 0 : elog(ERROR, "unrecognized CmdType: %d", (int) operation);
1141 : break;
1142 : }
1143 634 : break;
1144 0 : default:
1145 0 : ereport(ERROR,
1146 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1147 : errmsg("cannot change relation \"%s\"",
1148 : RelationGetRelationName(resultRel))));
1149 : break;
1150 : }
1151 114398 : }
1152 :
1153 : /*
1154 : * Check that a proposed rowmark target relation is a legal target
1155 : *
1156 : * In most cases parser and/or planner should have noticed this already, but
1157 : * they don't cover all cases.
1158 : */
1159 : static void
1160 9000 : CheckValidRowMarkRel(Relation rel, RowMarkType markType)
1161 : {
1162 : FdwRoutine *fdwroutine;
1163 :
1164 9000 : switch (rel->rd_rel->relkind)
1165 : {
1166 8988 : case RELKIND_RELATION:
1167 : case RELKIND_PARTITIONED_TABLE:
1168 : /* OK */
1169 8988 : break;
1170 0 : case RELKIND_SEQUENCE:
1171 : /* Must disallow this because we don't vacuum sequences */
1172 0 : ereport(ERROR,
1173 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1174 : errmsg("cannot lock rows in sequence \"%s\"",
1175 : RelationGetRelationName(rel))));
1176 : break;
1177 0 : case RELKIND_TOASTVALUE:
1178 : /* We could allow this, but there seems no good reason to */
1179 0 : ereport(ERROR,
1180 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1181 : errmsg("cannot lock rows in TOAST relation \"%s\"",
1182 : RelationGetRelationName(rel))));
1183 : break;
1184 0 : case RELKIND_VIEW:
1185 : /* Should not get here; planner should have expanded the view */
1186 0 : ereport(ERROR,
1187 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1188 : errmsg("cannot lock rows in view \"%s\"",
1189 : RelationGetRelationName(rel))));
1190 : break;
1191 12 : case RELKIND_MATVIEW:
1192 : /* Allow referencing a matview, but not actual locking clauses */
1193 12 : if (markType != ROW_MARK_REFERENCE)
1194 6 : ereport(ERROR,
1195 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1196 : errmsg("cannot lock rows in materialized view \"%s\"",
1197 : RelationGetRelationName(rel))));
1198 6 : break;
1199 0 : case RELKIND_FOREIGN_TABLE:
1200 : /* Okay only if the FDW supports it */
1201 0 : fdwroutine = GetFdwRoutineForRelation(rel, false);
1202 0 : if (fdwroutine->RefetchForeignRow == NULL)
1203 0 : ereport(ERROR,
1204 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1205 : errmsg("cannot lock rows in foreign table \"%s\"",
1206 : RelationGetRelationName(rel))));
1207 0 : break;
1208 0 : default:
1209 0 : ereport(ERROR,
1210 : (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1211 : errmsg("cannot lock rows in relation \"%s\"",
1212 : RelationGetRelationName(rel))));
1213 : break;
1214 : }
1215 8994 : }
1216 :
1217 : /*
1218 : * Initialize ResultRelInfo data for one result relation
1219 : *
1220 : * Caution: before Postgres 9.1, this function included the relkind checking
1221 : * that's now in CheckValidResultRel, and it also did ExecOpenIndices if
1222 : * appropriate. Be sure callers cover those needs.
1223 : */
1224 : void
1225 418392 : InitResultRelInfo(ResultRelInfo *resultRelInfo,
1226 : Relation resultRelationDesc,
1227 : Index resultRelationIndex,
1228 : ResultRelInfo *partition_root_rri,
1229 : int instrument_options)
1230 : {
1231 19246032 : MemSet(resultRelInfo, 0, sizeof(ResultRelInfo));
1232 418392 : resultRelInfo->type = T_ResultRelInfo;
1233 418392 : resultRelInfo->ri_RangeTableIndex = resultRelationIndex;
1234 418392 : resultRelInfo->ri_RelationDesc = resultRelationDesc;
1235 418392 : resultRelInfo->ri_NumIndices = 0;
1236 418392 : resultRelInfo->ri_IndexRelationDescs = NULL;
1237 418392 : resultRelInfo->ri_IndexRelationInfo = NULL;
1238 : /* make a copy so as not to depend on relcache info not changing... */
1239 418392 : resultRelInfo->ri_TrigDesc = CopyTriggerDesc(resultRelationDesc->trigdesc);
1240 418392 : if (resultRelInfo->ri_TrigDesc)
1241 : {
1242 15530 : int n = resultRelInfo->ri_TrigDesc->numtriggers;
1243 :
1244 15530 : resultRelInfo->ri_TrigFunctions = (FmgrInfo *)
1245 15530 : palloc0(n * sizeof(FmgrInfo));
1246 15530 : resultRelInfo->ri_TrigWhenExprs = (ExprState **)
1247 15530 : palloc0(n * sizeof(ExprState *));
1248 15530 : if (instrument_options)
1249 0 : resultRelInfo->ri_TrigInstrument = InstrAlloc(n, instrument_options, false);
1250 : }
1251 : else
1252 : {
1253 402862 : resultRelInfo->ri_TrigFunctions = NULL;
1254 402862 : resultRelInfo->ri_TrigWhenExprs = NULL;
1255 402862 : resultRelInfo->ri_TrigInstrument = NULL;
1256 : }
1257 418392 : if (resultRelationDesc->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1258 674 : resultRelInfo->ri_FdwRoutine = GetFdwRoutineForRelation(resultRelationDesc, true);
1259 : else
1260 417718 : resultRelInfo->ri_FdwRoutine = NULL;
1261 :
1262 : /* The following fields are set later if needed */
1263 418392 : resultRelInfo->ri_RowIdAttNo = 0;
1264 418392 : resultRelInfo->ri_extraUpdatedCols = NULL;
1265 418392 : resultRelInfo->ri_projectNew = NULL;
1266 418392 : resultRelInfo->ri_newTupleSlot = NULL;
1267 418392 : resultRelInfo->ri_oldTupleSlot = NULL;
1268 418392 : resultRelInfo->ri_projectNewInfoValid = false;
1269 418392 : resultRelInfo->ri_FdwState = NULL;
1270 418392 : resultRelInfo->ri_usesFdwDirectModify = false;
1271 418392 : resultRelInfo->ri_ConstraintExprs = NULL;
1272 418392 : resultRelInfo->ri_GeneratedExprsI = NULL;
1273 418392 : resultRelInfo->ri_GeneratedExprsU = NULL;
1274 418392 : resultRelInfo->ri_projectReturning = NULL;
1275 418392 : resultRelInfo->ri_onConflictArbiterIndexes = NIL;
1276 418392 : resultRelInfo->ri_onConflict = NULL;
1277 418392 : resultRelInfo->ri_ReturningSlot = NULL;
1278 418392 : resultRelInfo->ri_TrigOldSlot = NULL;
1279 418392 : resultRelInfo->ri_TrigNewSlot = NULL;
1280 418392 : resultRelInfo->ri_matchedMergeAction = NIL;
1281 418392 : resultRelInfo->ri_notMatchedMergeAction = NIL;
1282 :
1283 : /*
1284 : * Only ExecInitPartitionInfo() and ExecInitPartitionDispatchInfo() pass
1285 : * non-NULL partition_root_rri. For child relations that are part of the
1286 : * initial query rather than being dynamically added by tuple routing,
1287 : * this field is filled in ExecInitModifyTable().
1288 : */
1289 418392 : resultRelInfo->ri_RootResultRelInfo = partition_root_rri;
1290 : /* Set by ExecGetRootToChildMap */
1291 418392 : resultRelInfo->ri_RootToChildMap = NULL;
1292 418392 : resultRelInfo->ri_RootToChildMapValid = false;
1293 : /* Set by ExecInitRoutingInfo */
1294 418392 : resultRelInfo->ri_PartitionTupleSlot = NULL;
1295 418392 : resultRelInfo->ri_ChildToRootMap = NULL;
1296 418392 : resultRelInfo->ri_ChildToRootMapValid = false;
1297 418392 : resultRelInfo->ri_CopyMultiInsertBuffer = NULL;
1298 418392 : }
1299 :
1300 : /*
1301 : * ExecGetTriggerResultRel
1302 : * Get a ResultRelInfo for a trigger target relation.
1303 : *
1304 : * Most of the time, triggers are fired on one of the result relations of the
1305 : * query, and so we can just return a member of the es_result_relations array,
1306 : * or the es_tuple_routing_result_relations list (if any). (Note: in self-join
1307 : * situations there might be multiple members with the same OID; if so it
1308 : * doesn't matter which one we pick.)
1309 : *
1310 : * However, it is sometimes necessary to fire triggers on other relations;
1311 : * this happens mainly when an RI update trigger queues additional triggers
1312 : * on other relations, which will be processed in the context of the outer
1313 : * query. For efficiency's sake, we want to have a ResultRelInfo for those
1314 : * triggers too; that can avoid repeated re-opening of the relation. (It
1315 : * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
1316 : * triggers.) So we make additional ResultRelInfo's as needed, and save them
1317 : * in es_trig_target_relations.
1318 : */
1319 : ResultRelInfo *
1320 7402 : ExecGetTriggerResultRel(EState *estate, Oid relid,
1321 : ResultRelInfo *rootRelInfo)
1322 : {
1323 : ResultRelInfo *rInfo;
1324 : ListCell *l;
1325 : Relation rel;
1326 : MemoryContext oldcontext;
1327 :
1328 : /* Search through the query result relations */
1329 9398 : foreach(l, estate->es_opened_result_relations)
1330 : {
1331 8142 : rInfo = lfirst(l);
1332 8142 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1333 6146 : return rInfo;
1334 : }
1335 :
1336 : /*
1337 : * Search through the result relations that were created during tuple
1338 : * routing, if any.
1339 : */
1340 1458 : foreach(l, estate->es_tuple_routing_result_relations)
1341 : {
1342 766 : rInfo = (ResultRelInfo *) lfirst(l);
1343 766 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1344 564 : return rInfo;
1345 : }
1346 :
1347 : /* Nope, but maybe we already made an extra ResultRelInfo for it */
1348 1034 : foreach(l, estate->es_trig_target_relations)
1349 : {
1350 372 : rInfo = (ResultRelInfo *) lfirst(l);
1351 372 : if (RelationGetRelid(rInfo->ri_RelationDesc) == relid)
1352 30 : return rInfo;
1353 : }
1354 : /* Nope, so we need a new one */
1355 :
1356 : /*
1357 : * Open the target relation's relcache entry. We assume that an
1358 : * appropriate lock is still held by the backend from whenever the trigger
1359 : * event got queued, so we need take no new lock here. Also, we need not
1360 : * recheck the relkind, so no need for CheckValidResultRel.
1361 : */
1362 662 : rel = table_open(relid, NoLock);
1363 :
1364 : /*
1365 : * Make the new entry in the right context.
1366 : */
1367 662 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
1368 662 : rInfo = makeNode(ResultRelInfo);
1369 662 : InitResultRelInfo(rInfo,
1370 : rel,
1371 : 0, /* dummy rangetable index */
1372 : rootRelInfo,
1373 : estate->es_instrument);
1374 662 : estate->es_trig_target_relations =
1375 662 : lappend(estate->es_trig_target_relations, rInfo);
1376 662 : MemoryContextSwitchTo(oldcontext);
1377 :
1378 : /*
1379 : * Currently, we don't need any index information in ResultRelInfos used
1380 : * only for triggers, so no need to call ExecOpenIndices.
1381 : */
1382 :
1383 662 : return rInfo;
1384 : }
1385 :
1386 : /*
1387 : * Return the ancestor relations of a given leaf partition result relation
1388 : * up to and including the query's root target relation.
1389 : *
1390 : * These work much like the ones opened by ExecGetTriggerResultRel, except
1391 : * that we need to keep them in a separate list.
1392 : *
1393 : * These are closed by ExecCloseResultRelations.
1394 : */
1395 : List *
1396 252 : ExecGetAncestorResultRels(EState *estate, ResultRelInfo *resultRelInfo)
1397 : {
1398 252 : ResultRelInfo *rootRelInfo = resultRelInfo->ri_RootResultRelInfo;
1399 252 : Relation partRel = resultRelInfo->ri_RelationDesc;
1400 : Oid rootRelOid;
1401 :
1402 252 : if (!partRel->rd_rel->relispartition)
1403 0 : elog(ERROR, "cannot find ancestors of a non-partition result relation");
1404 : Assert(rootRelInfo != NULL);
1405 252 : rootRelOid = RelationGetRelid(rootRelInfo->ri_RelationDesc);
1406 252 : if (resultRelInfo->ri_ancestorResultRels == NIL)
1407 : {
1408 : ListCell *lc;
1409 210 : List *oids = get_partition_ancestors(RelationGetRelid(partRel));
1410 210 : List *ancResultRels = NIL;
1411 :
1412 276 : foreach(lc, oids)
1413 : {
1414 276 : Oid ancOid = lfirst_oid(lc);
1415 : Relation ancRel;
1416 : ResultRelInfo *rInfo;
1417 :
1418 : /*
1419 : * Ignore the root ancestor here, and use ri_RootResultRelInfo
1420 : * (below) for it instead. Also, we stop climbing up the
1421 : * hierarchy when we find the table that was mentioned in the
1422 : * query.
1423 : */
1424 276 : if (ancOid == rootRelOid)
1425 210 : break;
1426 :
1427 : /*
1428 : * All ancestors up to the root target relation must have been
1429 : * locked by the planner or AcquireExecutorLocks().
1430 : */
1431 66 : ancRel = table_open(ancOid, NoLock);
1432 66 : rInfo = makeNode(ResultRelInfo);
1433 :
1434 : /* dummy rangetable index */
1435 66 : InitResultRelInfo(rInfo, ancRel, 0, NULL,
1436 : estate->es_instrument);
1437 66 : ancResultRels = lappend(ancResultRels, rInfo);
1438 : }
1439 210 : ancResultRels = lappend(ancResultRels, rootRelInfo);
1440 210 : resultRelInfo->ri_ancestorResultRels = ancResultRels;
1441 : }
1442 :
1443 : /* We must have found some ancestor */
1444 : Assert(resultRelInfo->ri_ancestorResultRels != NIL);
1445 :
1446 252 : return resultRelInfo->ri_ancestorResultRels;
1447 : }
1448 :
1449 : /* ----------------------------------------------------------------
1450 : * ExecPostprocessPlan
1451 : *
1452 : * Give plan nodes a final chance to execute before shutdown
1453 : * ----------------------------------------------------------------
1454 : */
1455 : static void
1456 527910 : ExecPostprocessPlan(EState *estate)
1457 : {
1458 : ListCell *lc;
1459 :
1460 : /*
1461 : * Make sure nodes run forward.
1462 : */
1463 527910 : estate->es_direction = ForwardScanDirection;
1464 :
1465 : /*
1466 : * Run any secondary ModifyTable nodes to completion, in case the main
1467 : * query did not fetch all rows from them. (We do this to ensure that
1468 : * such nodes have predictable results.)
1469 : */
1470 528730 : foreach(lc, estate->es_auxmodifytables)
1471 : {
1472 820 : PlanState *ps = (PlanState *) lfirst(lc);
1473 :
1474 : for (;;)
1475 138 : {
1476 : TupleTableSlot *slot;
1477 :
1478 : /* Reset the per-output-tuple exprcontext each time */
1479 958 : ResetPerTupleExprContext(estate);
1480 :
1481 958 : slot = ExecProcNode(ps);
1482 :
1483 958 : if (TupIsNull(slot))
1484 : break;
1485 : }
1486 : }
1487 527910 : }
1488 :
1489 : /* ----------------------------------------------------------------
1490 : * ExecEndPlan
1491 : *
1492 : * Cleans up the query plan -- closes files and frees up storage
1493 : *
1494 : * NOTE: we are no longer very worried about freeing storage per se
1495 : * in this code; FreeExecutorState should be guaranteed to release all
1496 : * memory that needs to be released. What we are worried about doing
1497 : * is closing relations and dropping buffer pins. Thus, for example,
1498 : * tuple tables must be cleared or dropped to ensure pins are released.
1499 : * ----------------------------------------------------------------
1500 : */
1501 : static void
1502 544428 : ExecEndPlan(PlanState *planstate, EState *estate)
1503 : {
1504 : ListCell *l;
1505 :
1506 : /*
1507 : * shut down the node-type-specific query processing
1508 : */
1509 544428 : ExecEndNode(planstate);
1510 :
1511 : /*
1512 : * for subplans too
1513 : */
1514 578726 : foreach(l, estate->es_subplanstates)
1515 : {
1516 34298 : PlanState *subplanstate = (PlanState *) lfirst(l);
1517 :
1518 34298 : ExecEndNode(subplanstate);
1519 : }
1520 :
1521 : /*
1522 : * destroy the executor's tuple table. Actually we only care about
1523 : * releasing buffer pins and tupdesc refcounts; there's no need to pfree
1524 : * the TupleTableSlots, since the containing memory context is about to go
1525 : * away anyway.
1526 : */
1527 544428 : ExecResetTupleTable(estate->es_tupleTable, false);
1528 :
1529 : /*
1530 : * Close any Relations that have been opened for range table entries or
1531 : * result relations.
1532 : */
1533 544428 : ExecCloseResultRelations(estate);
1534 544428 : ExecCloseRangeTableRelations(estate);
1535 544428 : }
1536 :
1537 : /*
1538 : * Close any relations that have been opened for ResultRelInfos.
1539 : */
1540 : void
1541 546116 : ExecCloseResultRelations(EState *estate)
1542 : {
1543 : ListCell *l;
1544 :
1545 : /*
1546 : * close indexes of result relation(s) if any. (Rels themselves are
1547 : * closed in ExecCloseRangeTableRelations())
1548 : *
1549 : * In addition, close the stub RTs that may be in each resultrel's
1550 : * ri_ancestorResultRels.
1551 : */
1552 650334 : foreach(l, estate->es_opened_result_relations)
1553 : {
1554 104218 : ResultRelInfo *resultRelInfo = lfirst(l);
1555 : ListCell *lc;
1556 :
1557 104218 : ExecCloseIndices(resultRelInfo);
1558 104446 : foreach(lc, resultRelInfo->ri_ancestorResultRels)
1559 : {
1560 228 : ResultRelInfo *rInfo = lfirst(lc);
1561 :
1562 : /*
1563 : * Ancestors with RTI > 0 (should only be the root ancestor) are
1564 : * closed by ExecCloseRangeTableRelations.
1565 : */
1566 228 : if (rInfo->ri_RangeTableIndex > 0)
1567 180 : continue;
1568 :
1569 48 : table_close(rInfo->ri_RelationDesc, NoLock);
1570 : }
1571 : }
1572 :
1573 : /* Close any relations that have been opened by ExecGetTriggerResultRel(). */
1574 546602 : foreach(l, estate->es_trig_target_relations)
1575 : {
1576 486 : ResultRelInfo *resultRelInfo = (ResultRelInfo *) lfirst(l);
1577 :
1578 : /*
1579 : * Assert this is a "dummy" ResultRelInfo, see above. Otherwise we
1580 : * might be issuing a duplicate close against a Relation opened by
1581 : * ExecGetRangeTableRelation.
1582 : */
1583 : Assert(resultRelInfo->ri_RangeTableIndex == 0);
1584 :
1585 : /*
1586 : * Since ExecGetTriggerResultRel doesn't call ExecOpenIndices for
1587 : * these rels, we needn't call ExecCloseIndices either.
1588 : */
1589 : Assert(resultRelInfo->ri_NumIndices == 0);
1590 :
1591 486 : table_close(resultRelInfo->ri_RelationDesc, NoLock);
1592 : }
1593 546116 : }
1594 :
1595 : /*
1596 : * Close all relations opened by ExecGetRangeTableRelation().
1597 : *
1598 : * We do not release any locks we might hold on those rels.
1599 : */
1600 : void
1601 545740 : ExecCloseRangeTableRelations(EState *estate)
1602 : {
1603 : int i;
1604 :
1605 1508770 : for (i = 0; i < estate->es_range_table_size; i++)
1606 : {
1607 963030 : if (estate->es_relations[i])
1608 433304 : table_close(estate->es_relations[i], NoLock);
1609 : }
1610 545740 : }
1611 :
1612 : /* ----------------------------------------------------------------
1613 : * ExecutePlan
1614 : *
1615 : * Processes the query plan until we have retrieved 'numberTuples' tuples,
1616 : * moving in the specified direction.
1617 : *
1618 : * Runs to completion if numberTuples is 0
1619 : *
1620 : * Note: the ctid attribute is a 'junk' attribute that is removed before the
1621 : * user can see it
1622 : * ----------------------------------------------------------------
1623 : */
1624 : static void
1625 561102 : ExecutePlan(EState *estate,
1626 : PlanState *planstate,
1627 : bool use_parallel_mode,
1628 : CmdType operation,
1629 : bool sendTuples,
1630 : uint64 numberTuples,
1631 : ScanDirection direction,
1632 : DestReceiver *dest,
1633 : bool execute_once)
1634 : {
1635 : TupleTableSlot *slot;
1636 : uint64 current_tuple_count;
1637 :
1638 : /*
1639 : * initialize local variables
1640 : */
1641 561102 : current_tuple_count = 0;
1642 :
1643 : /*
1644 : * Set the direction.
1645 : */
1646 561102 : estate->es_direction = direction;
1647 :
1648 : /*
1649 : * If the plan might potentially be executed multiple times, we must force
1650 : * it to run without parallelism, because we might exit early.
1651 : */
1652 561102 : if (!execute_once)
1653 21026 : use_parallel_mode = false;
1654 :
1655 561102 : estate->es_use_parallel_mode = use_parallel_mode;
1656 561102 : if (use_parallel_mode)
1657 626 : EnterParallelMode();
1658 :
1659 : /*
1660 : * Loop until we've processed the proper number of tuples from the plan.
1661 : */
1662 : for (;;)
1663 : {
1664 : /* Reset the per-output-tuple exprcontext */
1665 11306790 : ResetPerTupleExprContext(estate);
1666 :
1667 : /*
1668 : * Execute the plan and obtain a tuple
1669 : */
1670 11306790 : slot = ExecProcNode(planstate);
1671 :
1672 : /*
1673 : * if the tuple is null, then we assume there is nothing more to
1674 : * process so we just end the loop...
1675 : */
1676 11285038 : if (TupIsNull(slot))
1677 : break;
1678 :
1679 : /*
1680 : * If we have a junk filter, then project a new tuple with the junk
1681 : * removed.
1682 : *
1683 : * Store this new "clean" tuple in the junkfilter's resultSlot.
1684 : * (Formerly, we stored it back over the "dirty" tuple, which is WRONG
1685 : * because that tuple slot has the wrong descriptor.)
1686 : */
1687 10876530 : if (estate->es_junkFilter != NULL)
1688 234114 : slot = ExecFilterJunk(estate->es_junkFilter, slot);
1689 :
1690 : /*
1691 : * If we are supposed to send the tuple somewhere, do so. (In
1692 : * practice, this is probably always the case at this point.)
1693 : */
1694 10876530 : if (sendTuples)
1695 : {
1696 : /*
1697 : * If we are not able to send the tuple, we assume the destination
1698 : * has closed and no more tuples can be sent. If that's the case,
1699 : * end the loop.
1700 : */
1701 10876530 : if (!dest->receiveSlot(slot, dest))
1702 0 : break;
1703 : }
1704 :
1705 : /*
1706 : * Count tuples processed, if this is a SELECT. (For other operation
1707 : * types, the ModifyTable plan node must count the appropriate
1708 : * events.)
1709 : */
1710 10876530 : if (operation == CMD_SELECT)
1711 10870830 : (estate->es_processed)++;
1712 :
1713 : /*
1714 : * check our tuple count.. if we've processed the proper number then
1715 : * quit, else loop again and process more tuples. Zero numberTuples
1716 : * means no limit.
1717 : */
1718 10876530 : current_tuple_count++;
1719 10876530 : if (numberTuples && numberTuples == current_tuple_count)
1720 130842 : break;
1721 : }
1722 :
1723 : /*
1724 : * If we know we won't need to back up, we can release resources at this
1725 : * point.
1726 : */
1727 539350 : if (!(estate->es_top_eflags & EXEC_FLAG_BACKWARD))
1728 533816 : ExecShutdownNode(planstate);
1729 :
1730 539350 : if (use_parallel_mode)
1731 620 : ExitParallelMode();
1732 539350 : }
1733 :
1734 :
1735 : /*
1736 : * ExecRelCheck --- check that tuple meets constraints for result relation
1737 : *
1738 : * Returns NULL if OK, else name of failed check constraint
1739 : */
1740 : static const char *
1741 2638 : ExecRelCheck(ResultRelInfo *resultRelInfo,
1742 : TupleTableSlot *slot, EState *estate)
1743 : {
1744 2638 : Relation rel = resultRelInfo->ri_RelationDesc;
1745 2638 : int ncheck = rel->rd_att->constr->num_check;
1746 2638 : ConstrCheck *check = rel->rd_att->constr->check;
1747 : ExprContext *econtext;
1748 : MemoryContext oldContext;
1749 : int i;
1750 :
1751 : /*
1752 : * CheckConstraintFetch let this pass with only a warning, but now we
1753 : * should fail rather than possibly failing to enforce an important
1754 : * constraint.
1755 : */
1756 2638 : if (ncheck != rel->rd_rel->relchecks)
1757 0 : elog(ERROR, "%d pg_constraint record(s) missing for relation \"%s\"",
1758 : rel->rd_rel->relchecks - ncheck, RelationGetRelationName(rel));
1759 :
1760 : /*
1761 : * If first time through for this result relation, build expression
1762 : * nodetrees for rel's constraint expressions. Keep them in the per-query
1763 : * memory context so they'll survive throughout the query.
1764 : */
1765 2638 : if (resultRelInfo->ri_ConstraintExprs == NULL)
1766 : {
1767 1186 : oldContext = MemoryContextSwitchTo(estate->es_query_cxt);
1768 1186 : resultRelInfo->ri_ConstraintExprs =
1769 1186 : (ExprState **) palloc(ncheck * sizeof(ExprState *));
1770 2740 : for (i = 0; i < ncheck; i++)
1771 : {
1772 : Expr *checkconstr;
1773 :
1774 1560 : checkconstr = stringToNode(check[i].ccbin);
1775 1554 : resultRelInfo->ri_ConstraintExprs[i] =
1776 1560 : ExecPrepareExpr(checkconstr, estate);
1777 : }
1778 1180 : MemoryContextSwitchTo(oldContext);
1779 : }
1780 :
1781 : /*
1782 : * We will use the EState's per-tuple context for evaluating constraint
1783 : * expressions (creating it if it's not already there).
1784 : */
1785 2632 : econtext = GetPerTupleExprContext(estate);
1786 :
1787 : /* Arrange for econtext's scan tuple to be the tuple under test */
1788 2632 : econtext->ecxt_scantuple = slot;
1789 :
1790 : /* And evaluate the constraints */
1791 5808 : for (i = 0; i < ncheck; i++)
1792 : {
1793 3570 : ExprState *checkconstr = resultRelInfo->ri_ConstraintExprs[i];
1794 :
1795 : /*
1796 : * NOTE: SQL specifies that a NULL result from a constraint expression
1797 : * is not to be treated as a failure. Therefore, use ExecCheck not
1798 : * ExecQual.
1799 : */
1800 3570 : if (!ExecCheck(checkconstr, econtext))
1801 394 : return check[i].ccname;
1802 : }
1803 :
1804 : /* NULL result means no error */
1805 2238 : return NULL;
1806 : }
1807 :
1808 : /*
1809 : * ExecPartitionCheck --- check that tuple meets the partition constraint.
1810 : *
1811 : * Returns true if it meets the partition constraint. If the constraint
1812 : * fails and we're asked to emit an error, do so and don't return; otherwise
1813 : * return false.
1814 : */
1815 : bool
1816 14780 : ExecPartitionCheck(ResultRelInfo *resultRelInfo, TupleTableSlot *slot,
1817 : EState *estate, bool emitError)
1818 : {
1819 : ExprContext *econtext;
1820 : bool success;
1821 :
1822 : /*
1823 : * If first time through, build expression state tree for the partition
1824 : * check expression. (In the corner case where the partition check
1825 : * expression is empty, ie there's a default partition and nothing else,
1826 : * we'll be fooled into executing this code each time through. But it's
1827 : * pretty darn cheap in that case, so we don't worry about it.)
1828 : */
1829 14780 : if (resultRelInfo->ri_PartitionCheckExpr == NULL)
1830 : {
1831 : /*
1832 : * Ensure that the qual tree and prepared expression are in the
1833 : * query-lifespan context.
1834 : */
1835 5158 : MemoryContext oldcxt = MemoryContextSwitchTo(estate->es_query_cxt);
1836 5158 : List *qual = RelationGetPartitionQual(resultRelInfo->ri_RelationDesc);
1837 :
1838 5158 : resultRelInfo->ri_PartitionCheckExpr = ExecPrepareCheck(qual, estate);
1839 5158 : MemoryContextSwitchTo(oldcxt);
1840 : }
1841 :
1842 : /*
1843 : * We will use the EState's per-tuple context for evaluating constraint
1844 : * expressions (creating it if it's not already there).
1845 : */
1846 14780 : econtext = GetPerTupleExprContext(estate);
1847 :
1848 : /* Arrange for econtext's scan tuple to be the tuple under test */
1849 14780 : econtext->ecxt_scantuple = slot;
1850 :
1851 : /*
1852 : * As in case of the catalogued constraints, we treat a NULL result as
1853 : * success here, not a failure.
1854 : */
1855 14780 : success = ExecCheck(resultRelInfo->ri_PartitionCheckExpr, econtext);
1856 :
1857 : /* if asked to emit error, don't actually return on failure */
1858 14780 : if (!success && emitError)
1859 202 : ExecPartitionCheckEmitError(resultRelInfo, slot, estate);
1860 :
1861 14578 : return success;
1862 : }
1863 :
1864 : /*
1865 : * ExecPartitionCheckEmitError - Form and emit an error message after a failed
1866 : * partition constraint check.
1867 : */
1868 : void
1869 244 : ExecPartitionCheckEmitError(ResultRelInfo *resultRelInfo,
1870 : TupleTableSlot *slot,
1871 : EState *estate)
1872 : {
1873 : Oid root_relid;
1874 : TupleDesc tupdesc;
1875 : char *val_desc;
1876 : Bitmapset *modifiedCols;
1877 :
1878 : /*
1879 : * If the tuple has been routed, it's been converted to the partition's
1880 : * rowtype, which might differ from the root table's. We must convert it
1881 : * back to the root table's rowtype so that val_desc in the error message
1882 : * matches the input tuple.
1883 : */
1884 244 : if (resultRelInfo->ri_RootResultRelInfo)
1885 : {
1886 20 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
1887 : TupleDesc old_tupdesc;
1888 : AttrMap *map;
1889 :
1890 20 : root_relid = RelationGetRelid(rootrel->ri_RelationDesc);
1891 20 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
1892 :
1893 20 : old_tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1894 : /* a reverse map */
1895 20 : map = build_attrmap_by_name_if_req(old_tupdesc, tupdesc, false);
1896 :
1897 : /*
1898 : * Partition-specific slot's tupdesc can't be changed, so allocate a
1899 : * new one.
1900 : */
1901 20 : if (map != NULL)
1902 8 : slot = execute_attr_map_slot(map, slot,
1903 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1904 20 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
1905 20 : ExecGetUpdatedCols(rootrel, estate));
1906 : }
1907 : else
1908 : {
1909 224 : root_relid = RelationGetRelid(resultRelInfo->ri_RelationDesc);
1910 224 : tupdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
1911 224 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
1912 224 : ExecGetUpdatedCols(resultRelInfo, estate));
1913 : }
1914 :
1915 244 : val_desc = ExecBuildSlotValueDescription(root_relid,
1916 : slot,
1917 : tupdesc,
1918 : modifiedCols,
1919 : 64);
1920 244 : ereport(ERROR,
1921 : (errcode(ERRCODE_CHECK_VIOLATION),
1922 : errmsg("new row for relation \"%s\" violates partition constraint",
1923 : RelationGetRelationName(resultRelInfo->ri_RelationDesc)),
1924 : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
1925 : errtable(resultRelInfo->ri_RelationDesc)));
1926 : }
1927 :
1928 : /*
1929 : * ExecConstraints - check constraints of the tuple in 'slot'
1930 : *
1931 : * This checks the traditional NOT NULL and check constraints.
1932 : *
1933 : * The partition constraint is *NOT* checked.
1934 : *
1935 : * Note: 'slot' contains the tuple to check the constraints of, which may
1936 : * have been converted from the original input tuple after tuple routing.
1937 : * 'resultRelInfo' is the final result relation, after tuple routing.
1938 : */
1939 : void
1940 3859364 : ExecConstraints(ResultRelInfo *resultRelInfo,
1941 : TupleTableSlot *slot, EState *estate)
1942 : {
1943 3859364 : Relation rel = resultRelInfo->ri_RelationDesc;
1944 3859364 : TupleDesc tupdesc = RelationGetDescr(rel);
1945 3859364 : TupleConstr *constr = tupdesc->constr;
1946 : Bitmapset *modifiedCols;
1947 :
1948 : Assert(constr); /* we should not be called otherwise */
1949 :
1950 3859364 : if (constr->has_not_null)
1951 : {
1952 3853806 : int natts = tupdesc->natts;
1953 : int attrChk;
1954 :
1955 14179622 : for (attrChk = 1; attrChk <= natts; attrChk++)
1956 : {
1957 10326052 : Form_pg_attribute att = TupleDescAttr(tupdesc, attrChk - 1);
1958 :
1959 10326052 : if (att->attnotnull && slot_attisnull(slot, attrChk))
1960 : {
1961 : char *val_desc;
1962 236 : Relation orig_rel = rel;
1963 236 : TupleDesc orig_tupdesc = RelationGetDescr(rel);
1964 :
1965 : /*
1966 : * If the tuple has been routed, it's been converted to the
1967 : * partition's rowtype, which might differ from the root
1968 : * table's. We must convert it back to the root table's
1969 : * rowtype so that val_desc shown error message matches the
1970 : * input tuple.
1971 : */
1972 236 : if (resultRelInfo->ri_RootResultRelInfo)
1973 : {
1974 54 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
1975 : AttrMap *map;
1976 :
1977 54 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
1978 : /* a reverse map */
1979 54 : map = build_attrmap_by_name_if_req(orig_tupdesc,
1980 : tupdesc,
1981 : false);
1982 :
1983 : /*
1984 : * Partition-specific slot's tupdesc can't be changed, so
1985 : * allocate a new one.
1986 : */
1987 54 : if (map != NULL)
1988 42 : slot = execute_attr_map_slot(map, slot,
1989 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
1990 54 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
1991 54 : ExecGetUpdatedCols(rootrel, estate));
1992 54 : rel = rootrel->ri_RelationDesc;
1993 : }
1994 : else
1995 182 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
1996 182 : ExecGetUpdatedCols(resultRelInfo, estate));
1997 236 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
1998 : slot,
1999 : tupdesc,
2000 : modifiedCols,
2001 : 64);
2002 :
2003 236 : ereport(ERROR,
2004 : (errcode(ERRCODE_NOT_NULL_VIOLATION),
2005 : errmsg("null value in column \"%s\" of relation \"%s\" violates not-null constraint",
2006 : NameStr(att->attname),
2007 : RelationGetRelationName(orig_rel)),
2008 : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2009 : errtablecol(orig_rel, attrChk)));
2010 : }
2011 : }
2012 : }
2013 :
2014 3859128 : if (rel->rd_rel->relchecks > 0)
2015 : {
2016 : const char *failed;
2017 :
2018 2638 : if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
2019 : {
2020 : char *val_desc;
2021 394 : Relation orig_rel = rel;
2022 :
2023 : /* See the comment above. */
2024 394 : if (resultRelInfo->ri_RootResultRelInfo)
2025 : {
2026 90 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2027 90 : TupleDesc old_tupdesc = RelationGetDescr(rel);
2028 : AttrMap *map;
2029 :
2030 90 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2031 : /* a reverse map */
2032 90 : map = build_attrmap_by_name_if_req(old_tupdesc,
2033 : tupdesc,
2034 : false);
2035 :
2036 : /*
2037 : * Partition-specific slot's tupdesc can't be changed, so
2038 : * allocate a new one.
2039 : */
2040 90 : if (map != NULL)
2041 60 : slot = execute_attr_map_slot(map, slot,
2042 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2043 90 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2044 90 : ExecGetUpdatedCols(rootrel, estate));
2045 90 : rel = rootrel->ri_RelationDesc;
2046 : }
2047 : else
2048 304 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2049 304 : ExecGetUpdatedCols(resultRelInfo, estate));
2050 394 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2051 : slot,
2052 : tupdesc,
2053 : modifiedCols,
2054 : 64);
2055 394 : ereport(ERROR,
2056 : (errcode(ERRCODE_CHECK_VIOLATION),
2057 : errmsg("new row for relation \"%s\" violates check constraint \"%s\"",
2058 : RelationGetRelationName(orig_rel), failed),
2059 : val_desc ? errdetail("Failing row contains %s.", val_desc) : 0,
2060 : errtableconstraint(orig_rel, failed)));
2061 : }
2062 : }
2063 3858728 : }
2064 :
2065 : /*
2066 : * ExecWithCheckOptions -- check that tuple satisfies any WITH CHECK OPTIONs
2067 : * of the specified kind.
2068 : *
2069 : * Note that this needs to be called multiple times to ensure that all kinds of
2070 : * WITH CHECK OPTIONs are handled (both those from views which have the WITH
2071 : * CHECK OPTION set and from row-level security policies). See ExecInsert()
2072 : * and ExecUpdate().
2073 : */
2074 : void
2075 1880 : ExecWithCheckOptions(WCOKind kind, ResultRelInfo *resultRelInfo,
2076 : TupleTableSlot *slot, EState *estate)
2077 : {
2078 1880 : Relation rel = resultRelInfo->ri_RelationDesc;
2079 1880 : TupleDesc tupdesc = RelationGetDescr(rel);
2080 : ExprContext *econtext;
2081 : ListCell *l1,
2082 : *l2;
2083 :
2084 : /*
2085 : * We will use the EState's per-tuple context for evaluating constraint
2086 : * expressions (creating it if it's not already there).
2087 : */
2088 1880 : econtext = GetPerTupleExprContext(estate);
2089 :
2090 : /* Arrange for econtext's scan tuple to be the tuple under test */
2091 1880 : econtext->ecxt_scantuple = slot;
2092 :
2093 : /* Check each of the constraints */
2094 4450 : forboth(l1, resultRelInfo->ri_WithCheckOptions,
2095 : l2, resultRelInfo->ri_WithCheckOptionExprs)
2096 : {
2097 3038 : WithCheckOption *wco = (WithCheckOption *) lfirst(l1);
2098 3038 : ExprState *wcoExpr = (ExprState *) lfirst(l2);
2099 :
2100 : /*
2101 : * Skip any WCOs which are not the kind we are looking for at this
2102 : * time.
2103 : */
2104 3038 : if (wco->kind != kind)
2105 1684 : continue;
2106 :
2107 : /*
2108 : * WITH CHECK OPTION checks are intended to ensure that the new tuple
2109 : * is visible (in the case of a view) or that it passes the
2110 : * 'with-check' policy (in the case of row security). If the qual
2111 : * evaluates to NULL or FALSE, then the new tuple won't be included in
2112 : * the view or doesn't pass the 'with-check' policy for the table.
2113 : */
2114 1354 : if (!ExecQual(wcoExpr, econtext))
2115 : {
2116 : char *val_desc;
2117 : Bitmapset *modifiedCols;
2118 :
2119 468 : switch (wco->kind)
2120 : {
2121 : /*
2122 : * For WITH CHECK OPTIONs coming from views, we might be
2123 : * able to provide the details on the row, depending on
2124 : * the permissions on the relation (that is, if the user
2125 : * could view it directly anyway). For RLS violations, we
2126 : * don't include the data since we don't know if the user
2127 : * should be able to view the tuple as that depends on the
2128 : * USING policy.
2129 : */
2130 198 : case WCO_VIEW_CHECK:
2131 : /* See the comment in ExecConstraints(). */
2132 198 : if (resultRelInfo->ri_RootResultRelInfo)
2133 : {
2134 36 : ResultRelInfo *rootrel = resultRelInfo->ri_RootResultRelInfo;
2135 36 : TupleDesc old_tupdesc = RelationGetDescr(rel);
2136 : AttrMap *map;
2137 :
2138 36 : tupdesc = RelationGetDescr(rootrel->ri_RelationDesc);
2139 : /* a reverse map */
2140 36 : map = build_attrmap_by_name_if_req(old_tupdesc,
2141 : tupdesc,
2142 : false);
2143 :
2144 : /*
2145 : * Partition-specific slot's tupdesc can't be changed,
2146 : * so allocate a new one.
2147 : */
2148 36 : if (map != NULL)
2149 18 : slot = execute_attr_map_slot(map, slot,
2150 : MakeTupleTableSlot(tupdesc, &TTSOpsVirtual));
2151 :
2152 36 : modifiedCols = bms_union(ExecGetInsertedCols(rootrel, estate),
2153 36 : ExecGetUpdatedCols(rootrel, estate));
2154 36 : rel = rootrel->ri_RelationDesc;
2155 : }
2156 : else
2157 162 : modifiedCols = bms_union(ExecGetInsertedCols(resultRelInfo, estate),
2158 162 : ExecGetUpdatedCols(resultRelInfo, estate));
2159 198 : val_desc = ExecBuildSlotValueDescription(RelationGetRelid(rel),
2160 : slot,
2161 : tupdesc,
2162 : modifiedCols,
2163 : 64);
2164 :
2165 198 : ereport(ERROR,
2166 : (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION),
2167 : errmsg("new row violates check option for view \"%s\"",
2168 : wco->relname),
2169 : val_desc ? errdetail("Failing row contains %s.",
2170 : val_desc) : 0));
2171 : break;
2172 222 : case WCO_RLS_INSERT_CHECK:
2173 : case WCO_RLS_UPDATE_CHECK:
2174 222 : if (wco->polname != NULL)
2175 48 : ereport(ERROR,
2176 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2177 : errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
2178 : wco->polname, wco->relname)));
2179 : else
2180 174 : ereport(ERROR,
2181 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2182 : errmsg("new row violates row-level security policy for table \"%s\"",
2183 : wco->relname)));
2184 : break;
2185 24 : case WCO_RLS_MERGE_UPDATE_CHECK:
2186 : case WCO_RLS_MERGE_DELETE_CHECK:
2187 24 : if (wco->polname != NULL)
2188 0 : ereport(ERROR,
2189 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2190 : errmsg("target row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2191 : wco->polname, wco->relname)));
2192 : else
2193 24 : ereport(ERROR,
2194 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2195 : errmsg("target row violates row-level security policy (USING expression) for table \"%s\"",
2196 : wco->relname)));
2197 : break;
2198 24 : case WCO_RLS_CONFLICT_CHECK:
2199 24 : if (wco->polname != NULL)
2200 0 : ereport(ERROR,
2201 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2202 : errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
2203 : wco->polname, wco->relname)));
2204 : else
2205 24 : ereport(ERROR,
2206 : (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
2207 : errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
2208 : wco->relname)));
2209 : break;
2210 0 : default:
2211 0 : elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
2212 : break;
2213 : }
2214 886 : }
2215 : }
2216 1412 : }
2217 :
2218 : /*
2219 : * ExecBuildSlotValueDescription -- construct a string representing a tuple
2220 : *
2221 : * This is intentionally very similar to BuildIndexValueDescription, but
2222 : * unlike that function, we truncate long field values (to at most maxfieldlen
2223 : * bytes). That seems necessary here since heap field values could be very
2224 : * long, whereas index entries typically aren't so wide.
2225 : *
2226 : * Also, unlike the case with index entries, we need to be prepared to ignore
2227 : * dropped columns. We used to use the slot's tuple descriptor to decode the
2228 : * data, but the slot's descriptor doesn't identify dropped columns, so we
2229 : * now need to be passed the relation's descriptor.
2230 : *
2231 : * Note that, like BuildIndexValueDescription, if the user does not have
2232 : * permission to view any of the columns involved, a NULL is returned. Unlike
2233 : * BuildIndexValueDescription, if the user has access to view a subset of the
2234 : * column involved, that subset will be returned with a key identifying which
2235 : * columns they are.
2236 : */
2237 : static char *
2238 1072 : ExecBuildSlotValueDescription(Oid reloid,
2239 : TupleTableSlot *slot,
2240 : TupleDesc tupdesc,
2241 : Bitmapset *modifiedCols,
2242 : int maxfieldlen)
2243 : {
2244 : StringInfoData buf;
2245 : StringInfoData collist;
2246 1072 : bool write_comma = false;
2247 1072 : bool write_comma_collist = false;
2248 : int i;
2249 : AclResult aclresult;
2250 1072 : bool table_perm = false;
2251 1072 : bool any_perm = false;
2252 :
2253 : /*
2254 : * Check if RLS is enabled and should be active for the relation; if so,
2255 : * then don't return anything. Otherwise, go through normal permission
2256 : * checks.
2257 : */
2258 1072 : if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED)
2259 0 : return NULL;
2260 :
2261 1072 : initStringInfo(&buf);
2262 :
2263 1072 : appendStringInfoChar(&buf, '(');
2264 :
2265 : /*
2266 : * Check if the user has permissions to see the row. Table-level SELECT
2267 : * allows access to all columns. If the user does not have table-level
2268 : * SELECT then we check each column and include those the user has SELECT
2269 : * rights on. Additionally, we always include columns the user provided
2270 : * data for.
2271 : */
2272 1072 : aclresult = pg_class_aclcheck(reloid, GetUserId(), ACL_SELECT);
2273 1072 : if (aclresult != ACLCHECK_OK)
2274 : {
2275 : /* Set up the buffer for the column list */
2276 60 : initStringInfo(&collist);
2277 60 : appendStringInfoChar(&collist, '(');
2278 : }
2279 : else
2280 1012 : table_perm = any_perm = true;
2281 :
2282 : /* Make sure the tuple is fully deconstructed */
2283 1072 : slot_getallattrs(slot);
2284 :
2285 3924 : for (i = 0; i < tupdesc->natts; i++)
2286 : {
2287 2852 : bool column_perm = false;
2288 : char *val;
2289 : int vallen;
2290 2852 : Form_pg_attribute att = TupleDescAttr(tupdesc, i);
2291 :
2292 : /* ignore dropped columns */
2293 2852 : if (att->attisdropped)
2294 38 : continue;
2295 :
2296 2814 : if (!table_perm)
2297 : {
2298 : /*
2299 : * No table-level SELECT, so need to make sure they either have
2300 : * SELECT rights on the column or that they have provided the data
2301 : * for the column. If not, omit this column from the error
2302 : * message.
2303 : */
2304 234 : aclresult = pg_attribute_aclcheck(reloid, att->attnum,
2305 : GetUserId(), ACL_SELECT);
2306 234 : if (bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
2307 138 : modifiedCols) || aclresult == ACLCHECK_OK)
2308 : {
2309 144 : column_perm = any_perm = true;
2310 :
2311 144 : if (write_comma_collist)
2312 84 : appendStringInfoString(&collist, ", ");
2313 : else
2314 60 : write_comma_collist = true;
2315 :
2316 144 : appendStringInfoString(&collist, NameStr(att->attname));
2317 : }
2318 : }
2319 :
2320 2814 : if (table_perm || column_perm)
2321 : {
2322 2724 : if (slot->tts_isnull[i])
2323 496 : val = "null";
2324 : else
2325 : {
2326 : Oid foutoid;
2327 : bool typisvarlena;
2328 :
2329 2228 : getTypeOutputInfo(att->atttypid,
2330 : &foutoid, &typisvarlena);
2331 2228 : val = OidOutputFunctionCall(foutoid, slot->tts_values[i]);
2332 : }
2333 :
2334 2724 : if (write_comma)
2335 1652 : appendStringInfoString(&buf, ", ");
2336 : else
2337 1072 : write_comma = true;
2338 :
2339 : /* truncate if needed */
2340 2724 : vallen = strlen(val);
2341 2724 : if (vallen <= maxfieldlen)
2342 2724 : appendBinaryStringInfo(&buf, val, vallen);
2343 : else
2344 : {
2345 0 : vallen = pg_mbcliplen(val, vallen, maxfieldlen);
2346 0 : appendBinaryStringInfo(&buf, val, vallen);
2347 0 : appendStringInfoString(&buf, "...");
2348 : }
2349 : }
2350 : }
2351 :
2352 : /* If we end up with zero columns being returned, then return NULL. */
2353 1072 : if (!any_perm)
2354 0 : return NULL;
2355 :
2356 1072 : appendStringInfoChar(&buf, ')');
2357 :
2358 1072 : if (!table_perm)
2359 : {
2360 60 : appendStringInfoString(&collist, ") = ");
2361 60 : appendBinaryStringInfo(&collist, buf.data, buf.len);
2362 :
2363 60 : return collist.data;
2364 : }
2365 :
2366 1012 : return buf.data;
2367 : }
2368 :
2369 :
2370 : /*
2371 : * ExecUpdateLockMode -- find the appropriate UPDATE tuple lock mode for a
2372 : * given ResultRelInfo
2373 : */
2374 : LockTupleMode
2375 7810 : ExecUpdateLockMode(EState *estate, ResultRelInfo *relinfo)
2376 : {
2377 : Bitmapset *keyCols;
2378 : Bitmapset *updatedCols;
2379 :
2380 : /*
2381 : * Compute lock mode to use. If columns that are part of the key have not
2382 : * been modified, then we can use a weaker lock, allowing for better
2383 : * concurrency.
2384 : */
2385 7810 : updatedCols = ExecGetAllUpdatedCols(relinfo, estate);
2386 7810 : keyCols = RelationGetIndexAttrBitmap(relinfo->ri_RelationDesc,
2387 : INDEX_ATTR_BITMAP_KEY);
2388 :
2389 7810 : if (bms_overlap(keyCols, updatedCols))
2390 250 : return LockTupleExclusive;
2391 :
2392 7560 : return LockTupleNoKeyExclusive;
2393 : }
2394 :
2395 : /*
2396 : * ExecFindRowMark -- find the ExecRowMark struct for given rangetable index
2397 : *
2398 : * If no such struct, either return NULL or throw error depending on missing_ok
2399 : */
2400 : ExecRowMark *
2401 9450 : ExecFindRowMark(EState *estate, Index rti, bool missing_ok)
2402 : {
2403 9450 : if (rti > 0 && rti <= estate->es_range_table_size &&
2404 9450 : estate->es_rowmarks != NULL)
2405 : {
2406 9450 : ExecRowMark *erm = estate->es_rowmarks[rti - 1];
2407 :
2408 9450 : if (erm)
2409 9450 : return erm;
2410 : }
2411 0 : if (!missing_ok)
2412 0 : elog(ERROR, "failed to find ExecRowMark for rangetable index %u", rti);
2413 0 : return NULL;
2414 : }
2415 :
2416 : /*
2417 : * ExecBuildAuxRowMark -- create an ExecAuxRowMark struct
2418 : *
2419 : * Inputs are the underlying ExecRowMark struct and the targetlist of the
2420 : * input plan node (not planstate node!). We need the latter to find out
2421 : * the column numbers of the resjunk columns.
2422 : */
2423 : ExecAuxRowMark *
2424 9450 : ExecBuildAuxRowMark(ExecRowMark *erm, List *targetlist)
2425 : {
2426 9450 : ExecAuxRowMark *aerm = (ExecAuxRowMark *) palloc0(sizeof(ExecAuxRowMark));
2427 : char resname[32];
2428 :
2429 9450 : aerm->rowmark = erm;
2430 :
2431 : /* Look up the resjunk columns associated with this rowmark */
2432 9450 : if (erm->markType != ROW_MARK_COPY)
2433 : {
2434 : /* need ctid for all methods other than COPY */
2435 9014 : snprintf(resname, sizeof(resname), "ctid%u", erm->rowmarkId);
2436 9014 : aerm->ctidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2437 : resname);
2438 9014 : if (!AttributeNumberIsValid(aerm->ctidAttNo))
2439 0 : elog(ERROR, "could not find junk %s column", resname);
2440 : }
2441 : else
2442 : {
2443 : /* need wholerow if COPY */
2444 436 : snprintf(resname, sizeof(resname), "wholerow%u", erm->rowmarkId);
2445 436 : aerm->wholeAttNo = ExecFindJunkAttributeInTlist(targetlist,
2446 : resname);
2447 436 : if (!AttributeNumberIsValid(aerm->wholeAttNo))
2448 0 : elog(ERROR, "could not find junk %s column", resname);
2449 : }
2450 :
2451 : /* if child rel, need tableoid */
2452 9450 : if (erm->rti != erm->prti)
2453 : {
2454 1540 : snprintf(resname, sizeof(resname), "tableoid%u", erm->rowmarkId);
2455 1540 : aerm->toidAttNo = ExecFindJunkAttributeInTlist(targetlist,
2456 : resname);
2457 1540 : if (!AttributeNumberIsValid(aerm->toidAttNo))
2458 0 : elog(ERROR, "could not find junk %s column", resname);
2459 : }
2460 :
2461 9450 : return aerm;
2462 : }
2463 :
2464 :
2465 : /*
2466 : * EvalPlanQual logic --- recheck modified tuple(s) to see if we want to
2467 : * process the updated version under READ COMMITTED rules.
2468 : *
2469 : * See backend/executor/README for some info about how this works.
2470 : */
2471 :
2472 :
2473 : /*
2474 : * Check the updated version of a tuple to see if we want to process it under
2475 : * READ COMMITTED rules.
2476 : *
2477 : * epqstate - state for EvalPlanQual rechecking
2478 : * relation - table containing tuple
2479 : * rti - rangetable index of table containing tuple
2480 : * inputslot - tuple for processing - this can be the slot from
2481 : * EvalPlanQualSlot() for this rel, for increased efficiency.
2482 : *
2483 : * This tests whether the tuple in inputslot still matches the relevant
2484 : * quals. For that result to be useful, typically the input tuple has to be
2485 : * last row version (otherwise the result isn't particularly useful) and
2486 : * locked (otherwise the result might be out of date). That's typically
2487 : * achieved by using table_tuple_lock() with the
2488 : * TUPLE_LOCK_FLAG_FIND_LAST_VERSION flag.
2489 : *
2490 : * Returns a slot containing the new candidate update/delete tuple, or
2491 : * NULL if we determine we shouldn't process the row.
2492 : */
2493 : TupleTableSlot *
2494 254 : EvalPlanQual(EPQState *epqstate, Relation relation,
2495 : Index rti, TupleTableSlot *inputslot)
2496 : {
2497 : TupleTableSlot *slot;
2498 : TupleTableSlot *testslot;
2499 :
2500 : Assert(rti > 0);
2501 :
2502 : /*
2503 : * Need to run a recheck subquery. Initialize or reinitialize EPQ state.
2504 : */
2505 254 : EvalPlanQualBegin(epqstate);
2506 :
2507 : /*
2508 : * Callers will often use the EvalPlanQualSlot to store the tuple to avoid
2509 : * an unnecessary copy.
2510 : */
2511 254 : testslot = EvalPlanQualSlot(epqstate, relation, rti);
2512 254 : if (testslot != inputslot)
2513 12 : ExecCopySlot(testslot, inputslot);
2514 :
2515 : /*
2516 : * Mark that an EPQ tuple is available for this relation. (If there is
2517 : * more than one result relation, the others remain marked as having no
2518 : * tuple available.)
2519 : */
2520 254 : epqstate->relsubs_done[rti - 1] = false;
2521 254 : epqstate->relsubs_blocked[rti - 1] = false;
2522 :
2523 : /*
2524 : * Run the EPQ query. We assume it will return at most one tuple.
2525 : */
2526 254 : slot = EvalPlanQualNext(epqstate);
2527 :
2528 : /*
2529 : * If we got a tuple, force the slot to materialize the tuple so that it
2530 : * is not dependent on any local state in the EPQ query (in particular,
2531 : * it's highly likely that the slot contains references to any pass-by-ref
2532 : * datums that may be present in copyTuple). As with the next step, this
2533 : * is to guard against early re-use of the EPQ query.
2534 : */
2535 254 : if (!TupIsNull(slot))
2536 188 : ExecMaterializeSlot(slot);
2537 :
2538 : /*
2539 : * Clear out the test tuple, and mark that no tuple is available here.
2540 : * This is needed in case the EPQ state is re-used to test a tuple for a
2541 : * different target relation.
2542 : */
2543 254 : ExecClearTuple(testslot);
2544 254 : epqstate->relsubs_blocked[rti - 1] = true;
2545 :
2546 254 : return slot;
2547 : }
2548 :
2549 : /*
2550 : * EvalPlanQualInit -- initialize during creation of a plan state node
2551 : * that might need to invoke EPQ processing.
2552 : *
2553 : * If the caller intends to use EvalPlanQual(), resultRelations should be
2554 : * a list of RT indexes of potential target relations for EvalPlanQual(),
2555 : * and we will arrange that the other listed relations don't return any
2556 : * tuple during an EvalPlanQual() call. Otherwise resultRelations
2557 : * should be NIL.
2558 : *
2559 : * Note: subplan/auxrowmarks can be NULL/NIL if they will be set later
2560 : * with EvalPlanQualSetPlan.
2561 : */
2562 : void
2563 254464 : EvalPlanQualInit(EPQState *epqstate, EState *parentestate,
2564 : Plan *subplan, List *auxrowmarks,
2565 : int epqParam, List *resultRelations)
2566 : {
2567 254464 : Index rtsize = parentestate->es_range_table_size;
2568 :
2569 : /* initialize data not changing over EPQState's lifetime */
2570 254464 : epqstate->parentestate = parentestate;
2571 254464 : epqstate->epqParam = epqParam;
2572 254464 : epqstate->resultRelations = resultRelations;
2573 :
2574 : /*
2575 : * Allocate space to reference a slot for each potential rti - do so now
2576 : * rather than in EvalPlanQualBegin(), as done for other dynamically
2577 : * allocated resources, so EvalPlanQualSlot() can be used to hold tuples
2578 : * that *may* need EPQ later, without forcing the overhead of
2579 : * EvalPlanQualBegin().
2580 : */
2581 254464 : epqstate->tuple_table = NIL;
2582 254464 : epqstate->relsubs_slot = (TupleTableSlot **)
2583 254464 : palloc0(rtsize * sizeof(TupleTableSlot *));
2584 :
2585 : /* ... and remember data that EvalPlanQualBegin will need */
2586 254464 : epqstate->plan = subplan;
2587 254464 : epqstate->arowMarks = auxrowmarks;
2588 :
2589 : /* ... and mark the EPQ state inactive */
2590 254464 : epqstate->origslot = NULL;
2591 254464 : epqstate->recheckestate = NULL;
2592 254464 : epqstate->recheckplanstate = NULL;
2593 254464 : epqstate->relsubs_rowmark = NULL;
2594 254464 : epqstate->relsubs_done = NULL;
2595 254464 : epqstate->relsubs_blocked = NULL;
2596 254464 : }
2597 :
2598 : /*
2599 : * EvalPlanQualSetPlan -- set or change subplan of an EPQState.
2600 : *
2601 : * We used to need this so that ModifyTable could deal with multiple subplans.
2602 : * It could now be refactored out of existence.
2603 : */
2604 : void
2605 102734 : EvalPlanQualSetPlan(EPQState *epqstate, Plan *subplan, List *auxrowmarks)
2606 : {
2607 : /* If we have a live EPQ query, shut it down */
2608 102734 : EvalPlanQualEnd(epqstate);
2609 : /* And set/change the plan pointer */
2610 102734 : epqstate->plan = subplan;
2611 : /* The rowmarks depend on the plan, too */
2612 102734 : epqstate->arowMarks = auxrowmarks;
2613 102734 : }
2614 :
2615 : /*
2616 : * Return, and create if necessary, a slot for an EPQ test tuple.
2617 : *
2618 : * Note this only requires EvalPlanQualInit() to have been called,
2619 : * EvalPlanQualBegin() is not necessary.
2620 : */
2621 : TupleTableSlot *
2622 12454 : EvalPlanQualSlot(EPQState *epqstate,
2623 : Relation relation, Index rti)
2624 : {
2625 : TupleTableSlot **slot;
2626 :
2627 : Assert(relation);
2628 : Assert(rti > 0 && rti <= epqstate->parentestate->es_range_table_size);
2629 12454 : slot = &epqstate->relsubs_slot[rti - 1];
2630 :
2631 12454 : if (*slot == NULL)
2632 : {
2633 : MemoryContext oldcontext;
2634 :
2635 5306 : oldcontext = MemoryContextSwitchTo(epqstate->parentestate->es_query_cxt);
2636 5306 : *slot = table_slot_create(relation, &epqstate->tuple_table);
2637 5306 : MemoryContextSwitchTo(oldcontext);
2638 : }
2639 :
2640 12454 : return *slot;
2641 : }
2642 :
2643 : /*
2644 : * Fetch the current row value for a non-locked relation, identified by rti,
2645 : * that needs to be scanned by an EvalPlanQual operation. origslot must have
2646 : * been set to contain the current result row (top-level row) that we need to
2647 : * recheck. Returns true if a substitution tuple was found, false if not.
2648 : */
2649 : bool
2650 32 : EvalPlanQualFetchRowMark(EPQState *epqstate, Index rti, TupleTableSlot *slot)
2651 : {
2652 32 : ExecAuxRowMark *earm = epqstate->relsubs_rowmark[rti - 1];
2653 32 : ExecRowMark *erm = earm->rowmark;
2654 : Datum datum;
2655 : bool isNull;
2656 :
2657 : Assert(earm != NULL);
2658 : Assert(epqstate->origslot != NULL);
2659 :
2660 32 : if (RowMarkRequiresRowShareLock(erm->markType))
2661 0 : elog(ERROR, "EvalPlanQual doesn't support locking rowmarks");
2662 :
2663 : /* if child rel, must check whether it produced this row */
2664 32 : if (erm->rti != erm->prti)
2665 : {
2666 : Oid tableoid;
2667 :
2668 0 : datum = ExecGetJunkAttribute(epqstate->origslot,
2669 0 : earm->toidAttNo,
2670 : &isNull);
2671 : /* non-locked rels could be on the inside of outer joins */
2672 0 : if (isNull)
2673 0 : return false;
2674 :
2675 0 : tableoid = DatumGetObjectId(datum);
2676 :
2677 : Assert(OidIsValid(erm->relid));
2678 0 : if (tableoid != erm->relid)
2679 : {
2680 : /* this child is inactive right now */
2681 0 : return false;
2682 : }
2683 : }
2684 :
2685 32 : if (erm->markType == ROW_MARK_REFERENCE)
2686 : {
2687 : Assert(erm->relation != NULL);
2688 :
2689 : /* fetch the tuple's ctid */
2690 26 : datum = ExecGetJunkAttribute(epqstate->origslot,
2691 26 : earm->ctidAttNo,
2692 : &isNull);
2693 : /* non-locked rels could be on the inside of outer joins */
2694 26 : if (isNull)
2695 0 : return false;
2696 :
2697 : /* fetch requests on foreign tables must be passed to their FDW */
2698 26 : if (erm->relation->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
2699 : {
2700 : FdwRoutine *fdwroutine;
2701 0 : bool updated = false;
2702 :
2703 0 : fdwroutine = GetFdwRoutineForRelation(erm->relation, false);
2704 : /* this should have been checked already, but let's be safe */
2705 0 : if (fdwroutine->RefetchForeignRow == NULL)
2706 0 : ereport(ERROR,
2707 : (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
2708 : errmsg("cannot lock rows in foreign table \"%s\"",
2709 : RelationGetRelationName(erm->relation))));
2710 :
2711 0 : fdwroutine->RefetchForeignRow(epqstate->recheckestate,
2712 : erm,
2713 : datum,
2714 : slot,
2715 : &updated);
2716 0 : if (TupIsNull(slot))
2717 0 : elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2718 :
2719 : /*
2720 : * Ideally we'd insist on updated == false here, but that assumes
2721 : * that FDWs can track that exactly, which they might not be able
2722 : * to. So just ignore the flag.
2723 : */
2724 0 : return true;
2725 : }
2726 : else
2727 : {
2728 : /* ordinary table, fetch the tuple */
2729 26 : if (!table_tuple_fetch_row_version(erm->relation,
2730 26 : (ItemPointer) DatumGetPointer(datum),
2731 : SnapshotAny, slot))
2732 0 : elog(ERROR, "failed to fetch tuple for EvalPlanQual recheck");
2733 26 : return true;
2734 : }
2735 : }
2736 : else
2737 : {
2738 : Assert(erm->markType == ROW_MARK_COPY);
2739 :
2740 : /* fetch the whole-row Var for the relation */
2741 6 : datum = ExecGetJunkAttribute(epqstate->origslot,
2742 6 : earm->wholeAttNo,
2743 : &isNull);
2744 : /* non-locked rels could be on the inside of outer joins */
2745 6 : if (isNull)
2746 0 : return false;
2747 :
2748 6 : ExecStoreHeapTupleDatum(datum, slot);
2749 6 : return true;
2750 : }
2751 : }
2752 :
2753 : /*
2754 : * Fetch the next row (if any) from EvalPlanQual testing
2755 : *
2756 : * (In practice, there should never be more than one row...)
2757 : */
2758 : TupleTableSlot *
2759 308 : EvalPlanQualNext(EPQState *epqstate)
2760 : {
2761 : MemoryContext oldcontext;
2762 : TupleTableSlot *slot;
2763 :
2764 308 : oldcontext = MemoryContextSwitchTo(epqstate->recheckestate->es_query_cxt);
2765 308 : slot = ExecProcNode(epqstate->recheckplanstate);
2766 308 : MemoryContextSwitchTo(oldcontext);
2767 :
2768 308 : return slot;
2769 : }
2770 :
2771 : /*
2772 : * Initialize or reset an EvalPlanQual state tree
2773 : */
2774 : void
2775 362 : EvalPlanQualBegin(EPQState *epqstate)
2776 : {
2777 362 : EState *parentestate = epqstate->parentestate;
2778 362 : EState *recheckestate = epqstate->recheckestate;
2779 :
2780 362 : if (recheckestate == NULL)
2781 : {
2782 : /* First time through, so create a child EState */
2783 220 : EvalPlanQualStart(epqstate, epqstate->plan);
2784 : }
2785 : else
2786 : {
2787 : /*
2788 : * We already have a suitable child EPQ tree, so just reset it.
2789 : */
2790 142 : Index rtsize = parentestate->es_range_table_size;
2791 142 : PlanState *rcplanstate = epqstate->recheckplanstate;
2792 :
2793 : /*
2794 : * Reset the relsubs_done[] flags to equal relsubs_blocked[], so that
2795 : * the EPQ run will never attempt to fetch tuples from blocked target
2796 : * relations.
2797 : */
2798 142 : memcpy(epqstate->relsubs_done, epqstate->relsubs_blocked,
2799 : rtsize * sizeof(bool));
2800 :
2801 : /* Recopy current values of parent parameters */
2802 142 : if (parentestate->es_plannedstmt->paramExecTypes != NIL)
2803 : {
2804 : int i;
2805 :
2806 : /*
2807 : * Force evaluation of any InitPlan outputs that could be needed
2808 : * by the subplan, just in case they got reset since
2809 : * EvalPlanQualStart (see comments therein).
2810 : */
2811 142 : ExecSetParamPlanMulti(rcplanstate->plan->extParam,
2812 142 : GetPerTupleExprContext(parentestate));
2813 :
2814 142 : i = list_length(parentestate->es_plannedstmt->paramExecTypes);
2815 :
2816 302 : while (--i >= 0)
2817 : {
2818 : /* copy value if any, but not execPlan link */
2819 160 : recheckestate->es_param_exec_vals[i].value =
2820 160 : parentestate->es_param_exec_vals[i].value;
2821 160 : recheckestate->es_param_exec_vals[i].isnull =
2822 160 : parentestate->es_param_exec_vals[i].isnull;
2823 : }
2824 : }
2825 :
2826 : /*
2827 : * Mark child plan tree as needing rescan at all scan nodes. The
2828 : * first ExecProcNode will take care of actually doing the rescan.
2829 : */
2830 142 : rcplanstate->chgParam = bms_add_member(rcplanstate->chgParam,
2831 : epqstate->epqParam);
2832 : }
2833 362 : }
2834 :
2835 : /*
2836 : * Start execution of an EvalPlanQual plan tree.
2837 : *
2838 : * This is a cut-down version of ExecutorStart(): we copy some state from
2839 : * the top-level estate rather than initializing it fresh.
2840 : */
2841 : static void
2842 220 : EvalPlanQualStart(EPQState *epqstate, Plan *planTree)
2843 : {
2844 220 : EState *parentestate = epqstate->parentestate;
2845 220 : Index rtsize = parentestate->es_range_table_size;
2846 : EState *rcestate;
2847 : MemoryContext oldcontext;
2848 : ListCell *l;
2849 :
2850 220 : epqstate->recheckestate = rcestate = CreateExecutorState();
2851 :
2852 220 : oldcontext = MemoryContextSwitchTo(rcestate->es_query_cxt);
2853 :
2854 : /* signal that this is an EState for executing EPQ */
2855 220 : rcestate->es_epq_active = epqstate;
2856 :
2857 : /*
2858 : * Child EPQ EStates share the parent's copy of unchanging state such as
2859 : * the snapshot, rangetable, and external Param info. They need their own
2860 : * copies of local state, including a tuple table, es_param_exec_vals,
2861 : * result-rel info, etc.
2862 : */
2863 220 : rcestate->es_direction = ForwardScanDirection;
2864 220 : rcestate->es_snapshot = parentestate->es_snapshot;
2865 220 : rcestate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot;
2866 220 : rcestate->es_range_table = parentestate->es_range_table;
2867 220 : rcestate->es_range_table_size = parentestate->es_range_table_size;
2868 220 : rcestate->es_relations = parentestate->es_relations;
2869 220 : rcestate->es_rowmarks = parentestate->es_rowmarks;
2870 220 : rcestate->es_rteperminfos = parentestate->es_rteperminfos;
2871 220 : rcestate->es_plannedstmt = parentestate->es_plannedstmt;
2872 220 : rcestate->es_junkFilter = parentestate->es_junkFilter;
2873 220 : rcestate->es_output_cid = parentestate->es_output_cid;
2874 220 : rcestate->es_queryEnv = parentestate->es_queryEnv;
2875 :
2876 : /*
2877 : * ResultRelInfos needed by subplans are initialized from scratch when the
2878 : * subplans themselves are initialized.
2879 : */
2880 220 : rcestate->es_result_relations = NULL;
2881 : /* es_trig_target_relations must NOT be copied */
2882 220 : rcestate->es_top_eflags = parentestate->es_top_eflags;
2883 220 : rcestate->es_instrument = parentestate->es_instrument;
2884 : /* es_auxmodifytables must NOT be copied */
2885 :
2886 : /*
2887 : * The external param list is simply shared from parent. The internal
2888 : * param workspace has to be local state, but we copy the initial values
2889 : * from the parent, so as to have access to any param values that were
2890 : * already set from other parts of the parent's plan tree.
2891 : */
2892 220 : rcestate->es_param_list_info = parentestate->es_param_list_info;
2893 220 : if (parentestate->es_plannedstmt->paramExecTypes != NIL)
2894 : {
2895 : int i;
2896 :
2897 : /*
2898 : * Force evaluation of any InitPlan outputs that could be needed by
2899 : * the subplan. (With more complexity, maybe we could postpone this
2900 : * till the subplan actually demands them, but it doesn't seem worth
2901 : * the trouble; this is a corner case already, since usually the
2902 : * InitPlans would have been evaluated before reaching EvalPlanQual.)
2903 : *
2904 : * This will not touch output params of InitPlans that occur somewhere
2905 : * within the subplan tree, only those that are attached to the
2906 : * ModifyTable node or above it and are referenced within the subplan.
2907 : * That's OK though, because the planner would only attach such
2908 : * InitPlans to a lower-level SubqueryScan node, and EPQ execution
2909 : * will not descend into a SubqueryScan.
2910 : *
2911 : * The EState's per-output-tuple econtext is sufficiently short-lived
2912 : * for this, since it should get reset before there is any chance of
2913 : * doing EvalPlanQual again.
2914 : */
2915 220 : ExecSetParamPlanMulti(planTree->extParam,
2916 220 : GetPerTupleExprContext(parentestate));
2917 :
2918 : /* now make the internal param workspace ... */
2919 220 : i = list_length(parentestate->es_plannedstmt->paramExecTypes);
2920 220 : rcestate->es_param_exec_vals = (ParamExecData *)
2921 220 : palloc0(i * sizeof(ParamExecData));
2922 : /* ... and copy down all values, whether really needed or not */
2923 538 : while (--i >= 0)
2924 : {
2925 : /* copy value if any, but not execPlan link */
2926 318 : rcestate->es_param_exec_vals[i].value =
2927 318 : parentestate->es_param_exec_vals[i].value;
2928 318 : rcestate->es_param_exec_vals[i].isnull =
2929 318 : parentestate->es_param_exec_vals[i].isnull;
2930 : }
2931 : }
2932 :
2933 : /*
2934 : * Initialize private state information for each SubPlan. We must do this
2935 : * before running ExecInitNode on the main query tree, since
2936 : * ExecInitSubPlan expects to be able to find these entries. Some of the
2937 : * SubPlans might not be used in the part of the plan tree we intend to
2938 : * run, but since it's not easy to tell which, we just initialize them
2939 : * all.
2940 : */
2941 : Assert(rcestate->es_subplanstates == NIL);
2942 276 : foreach(l, parentestate->es_plannedstmt->subplans)
2943 : {
2944 56 : Plan *subplan = (Plan *) lfirst(l);
2945 : PlanState *subplanstate;
2946 :
2947 56 : subplanstate = ExecInitNode(subplan, rcestate, 0);
2948 56 : rcestate->es_subplanstates = lappend(rcestate->es_subplanstates,
2949 : subplanstate);
2950 : }
2951 :
2952 : /*
2953 : * Build an RTI indexed array of rowmarks, so that
2954 : * EvalPlanQualFetchRowMark() can efficiently access the to be fetched
2955 : * rowmark.
2956 : */
2957 220 : epqstate->relsubs_rowmark = (ExecAuxRowMark **)
2958 220 : palloc0(rtsize * sizeof(ExecAuxRowMark *));
2959 242 : foreach(l, epqstate->arowMarks)
2960 : {
2961 22 : ExecAuxRowMark *earm = (ExecAuxRowMark *) lfirst(l);
2962 :
2963 22 : epqstate->relsubs_rowmark[earm->rowmark->rti - 1] = earm;
2964 : }
2965 :
2966 : /*
2967 : * Initialize per-relation EPQ tuple states. Result relations, if any,
2968 : * get marked as blocked; others as not-fetched.
2969 : */
2970 220 : epqstate->relsubs_done = palloc_array(bool, rtsize);
2971 220 : epqstate->relsubs_blocked = palloc0_array(bool, rtsize);
2972 :
2973 430 : foreach(l, epqstate->resultRelations)
2974 : {
2975 210 : int rtindex = lfirst_int(l);
2976 :
2977 : Assert(rtindex > 0 && rtindex <= rtsize);
2978 210 : epqstate->relsubs_blocked[rtindex - 1] = true;
2979 : }
2980 :
2981 220 : memcpy(epqstate->relsubs_done, epqstate->relsubs_blocked,
2982 : rtsize * sizeof(bool));
2983 :
2984 : /*
2985 : * Initialize the private state information for all the nodes in the part
2986 : * of the plan tree we need to run. This opens files, allocates storage
2987 : * and leaves us ready to start processing tuples.
2988 : */
2989 220 : epqstate->recheckplanstate = ExecInitNode(planTree, rcestate, 0);
2990 :
2991 220 : MemoryContextSwitchTo(oldcontext);
2992 220 : }
2993 :
2994 : /*
2995 : * EvalPlanQualEnd -- shut down at termination of parent plan state node,
2996 : * or if we are done with the current EPQ child.
2997 : *
2998 : * This is a cut-down version of ExecutorEnd(); basically we want to do most
2999 : * of the normal cleanup, but *not* close result relations (which we are
3000 : * just sharing from the outer query). We do, however, have to close any
3001 : * result and trigger target relations that got opened, since those are not
3002 : * shared. (There probably shouldn't be any of the latter, but just in
3003 : * case...)
3004 : */
3005 : void
3006 355600 : EvalPlanQualEnd(EPQState *epqstate)
3007 : {
3008 355600 : EState *estate = epqstate->recheckestate;
3009 : Index rtsize;
3010 : MemoryContext oldcontext;
3011 : ListCell *l;
3012 :
3013 355600 : rtsize = epqstate->parentestate->es_range_table_size;
3014 :
3015 : /*
3016 : * We may have a tuple table, even if EPQ wasn't started, because we allow
3017 : * use of EvalPlanQualSlot() without calling EvalPlanQualBegin().
3018 : */
3019 355600 : if (epqstate->tuple_table != NIL)
3020 : {
3021 5124 : memset(epqstate->relsubs_slot, 0,
3022 : rtsize * sizeof(TupleTableSlot *));
3023 5124 : ExecResetTupleTable(epqstate->tuple_table, true);
3024 5124 : epqstate->tuple_table = NIL;
3025 : }
3026 :
3027 : /* EPQ wasn't started, nothing further to do */
3028 355600 : if (estate == NULL)
3029 355390 : return;
3030 :
3031 210 : oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
3032 :
3033 210 : ExecEndNode(epqstate->recheckplanstate);
3034 :
3035 260 : foreach(l, estate->es_subplanstates)
3036 : {
3037 50 : PlanState *subplanstate = (PlanState *) lfirst(l);
3038 :
3039 50 : ExecEndNode(subplanstate);
3040 : }
3041 :
3042 : /* throw away the per-estate tuple table, some node may have used it */
3043 210 : ExecResetTupleTable(estate->es_tupleTable, false);
3044 :
3045 : /* Close any result and trigger target relations attached to this EState */
3046 210 : ExecCloseResultRelations(estate);
3047 :
3048 210 : MemoryContextSwitchTo(oldcontext);
3049 :
3050 210 : FreeExecutorState(estate);
3051 :
3052 : /* Mark EPQState idle */
3053 210 : epqstate->origslot = NULL;
3054 210 : epqstate->recheckestate = NULL;
3055 210 : epqstate->recheckplanstate = NULL;
3056 210 : epqstate->relsubs_rowmark = NULL;
3057 210 : epqstate->relsubs_done = NULL;
3058 210 : epqstate->relsubs_blocked = NULL;
3059 : }
|